diff --git a/spaces/101-5/gpt4free/g4f/.v1/unfinished/openprompt/create.py b/spaces/101-5/gpt4free/g4f/.v1/unfinished/openprompt/create.py deleted file mode 100644 index c968c162dbd4e44e4f29e5dfbf270c28963cb97b..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/unfinished/openprompt/create.py +++ /dev/null @@ -1,64 +0,0 @@ -from json import dumps -# from mail import MailClient -from re import findall - -from requests import post, get - -html = get('https://developermail.com/mail/') -print(html.cookies.get('mailboxId')) -email = findall(r'mailto:(.*)">', html.text)[0] - -headers = { - 'apikey': 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InVzanNtdWZ1emRjcnJjZXVobnlqIiwicm9sZSI6ImFub24iLCJpYXQiOjE2NzgyODYyMzYsImV4cCI6MTk5Mzg2MjIzNn0.2MQ9Lkh-gPqQwV08inIgqozfbYm5jdYWtf-rn-wfQ7U', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36', - 'x-client-info': '@supabase/auth-helpers-nextjs@0.5.6', -} - -json_data = { - 'email': email, - 'password': 'T4xyt4Yn6WWQ4NC', - 'data': {}, - 'gotrue_meta_security': {}, -} - -response = post('https://usjsmufuzdcrrceuhnyj.supabase.co/auth/v1/signup', headers=headers, json=json_data) -print(response.json()) - -# email_link = None -# while not email_link: -# sleep(1) - -# mails = mailbox.getmails() -# print(mails) - - -quit() - -url = input("Enter the url: ") -response = get(url, allow_redirects=False) - -# https://openprompt.co/#access_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8&expires_in=604800&refresh_token=_Zp8uXIA2InTDKYgo8TCqA&token_type=bearer&type=signup - -redirect = response.headers.get('location') -access_token = redirect.split('&')[0].split('=')[1] -refresh_token = redirect.split('&')[2].split('=')[1] - -supabase_auth_token = dumps([access_token, refresh_token, None, None, None], separators=(',', ':')) -print(supabase_auth_token) - -cookies = { - 'supabase-auth-token': supabase_auth_token -} - -json_data = { - 'messages': [ - { - 'role': 'user', - 'content': 'how do I reverse a string in python?' - } - ] -} - -response = post('https://openprompt.co/api/chat2', cookies=cookies, json=json_data, stream=True) -for chunk in response.iter_content(chunk_size=1024): - print(chunk) diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/The Pirate Bay Fallout 4 Codex !FREE! Crack.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/The Pirate Bay Fallout 4 Codex !FREE! Crack.md deleted file mode 100644 index 6a3311fbc16f42c0a2cd75c8456ada4b6418aeb8..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/The Pirate Bay Fallout 4 Codex !FREE! Crack.md +++ /dev/null @@ -1,94 +0,0 @@ -## The Pirate Bay Fallout 4 Codex Crack - - - - - - ![The Pirate Bay Fallout 4 Codex !FREE! Crack](https://eprosengenharia.com/wp-content/uploads/2016/05/incendio.jpg?w\u003d640) - - - - - -**Download File ✑ ✑ ✑ [https://www.google.com/url?q=https%3A%2F%2Fcinurl.com%2F2txKLp&sa=D&sntz=1&usg=AOvVaw0FnvQrPp9dpud-nHZVDtkS](https://www.google.com/url?q=https%3A%2F%2Fcinurl.com%2F2txKLp&sa=D&sntz=1&usg=AOvVaw0FnvQrPp9dpud-nHZVDtkS)** - - - - - - - - - - - - - -# The Pirate Bay Fallout 4 Codex Crack: How to Download and Install - - - -Fallout 4 is one of the most popular RPG games of all time, set in a post-apocalyptic world where you have to survive and rebuild civilization. However, the game is not cheap and requires a Steam account and a valid activation code to play. If you want to play Fallout 4 for free, you might be interested in downloading the Codex crack from The Pirate Bay, one of the most resilient bittorrent sites on the internet. - - - -The Codex crack is a modified version of the game that bypasses the Steam protection and allows you to play without a license key. It also includes all the updates and DLCs that have been released for Fallout 4, as well as some optional mods that enhance the gameplay. However, downloading and installing the Codex crack is not as simple as clicking a button. You need to follow some steps and precautions to make sure everything works properly. - - - -In this article, we will show you how to download and install the Codex crack for Fallout 4 from The Pirate Bay, as well as some tips and tricks to avoid any problems or errors. Please note that this article is for educational purposes only and we do not condone piracy or illegal downloading of any kind. You should always support the developers and publishers of the games you enjoy by buying them legally. - - - -## Step 1: Download a Torrent Client - - - -The first thing you need to do is to download a torrent client, which is a software that allows you to download files from bittorrent networks. There are many torrent clients available online, but some of the most popular ones are uTorrent, BitTorrent, qBittorrent, and Vuze. You can choose any of them, but make sure you download them from their official websites and not from third-party sources that might contain malware or viruses. - - - -Once you have downloaded and installed your torrent client of choice, you need to configure it properly to ensure optimal performance and security. Some of the settings you should check are: - - - -- Limit your upload and download speed according to your internet connection. - -- Enable encryption to protect your traffic from being monitored or throttled by your ISP. - -- Use a VPN or a proxy to hide your IP address and location from other peers and trackers. - -- Disable DHT, PEX, and LPD to avoid connecting to unwanted or malicious peers. - -- Choose a port that is not commonly used by other applications or blocked by firewalls. - - - -## Step 2: Download the Codex Crack from The Pirate Bay - - - -The next step is to download the Codex crack for Fallout 4 from The Pirate Bay. To do this, you need to visit the official website of The Pirate Bay, which might be blocked or censored in some countries. If that is the case, you can use a proxy site or a mirror site that has a different domain name but accesses the same content as The Pirate Bay. - - - -Once you are on The Pirate Bay website, you need to search for "Fallout 4 Codex" in the search bar. You will see a list of results that match your query, but not all of them are reliable or safe. You need to look for some indicators that can help you identify the best torrent to download. Some of these indicators are: - - - -- The number of seeders and leechers: Seeders are users who have the complete file and are sharing it with others. Leechers are users who are downloading the file but have not completed it yet. The more seeders and leechers a torrent has, the faster and more stable the download will be. - -- The comments and ratings: Comments and ratings are feedback from other users who have downloaded the torrent before. They can tell you if the torrent is working properly, if it has any errors or viruses, if it has good quality or not, etc. You should always read the comments and ratings before downloading any torrent. - -- The uploader's name and reputation: The uploader's name and reputation are indicators of how trustworthy and reliable they are. You should look for uploaders who have a green or purple skull icon next to their name, which means they are VIP or trusted users who have uploaded many torrents without any issues. - - - -Based on these indicators, we recommend downloading the torrent with index "1" from The - - 1b8d091108 - - - - - diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/CorelDRAW X8 How to Unlock the Full Potential of this Graphics Design Software.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/CorelDRAW X8 How to Unlock the Full Potential of this Graphics Design Software.md deleted file mode 100644 index 5b0ddb29f5497210d9f6ad5a80e9f1ccfb4134bf..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/CorelDRAW X8 How to Unlock the Full Potential of this Graphics Design Software.md +++ /dev/null @@ -1,43 +0,0 @@ - -

How to Download CorelDRAW X8 Full Version with Serial Number

-

If you are looking for a powerful and versatile graphics design software, you might want to try CorelDRAW X8. This software can help you create vector illustrations, layouts, photo editing, typography, and more. However, to use this software, you need to have a valid serial number that can activate the full version. In this article, we will show you how to download CorelDRAW X8 full version with serial number for free.

-

What is CorelDRAW X8?

-

CorelDRAW X8 is the 18th version of the CorelDRAW Graphics Suite, which was released in 2016. It is a software package that includes several applications for different design tasks, such as:

-

free download coreldraw x8 full version with serial number


Download Filehttps://byltly.com/2uKyzj



- -

Some of the features of CorelDRAW X8 are:

- -

How to Download CorelDRAW X8 Full Version with Serial Number?

-

To download CorelDRAW X8 full version with serial number, you need to follow these steps:

-
    -
  1. Go to the official website of CorelDRAW and click on the "Free Download" button.
  2. -
  3. Select your operating system (Windows or Mac) and your language.
  4. -
  5. Enter your email address and click on "Download Now". You will receive an email with a download link and instructions.
  6. -
  7. Click on the download link and save the file on your computer.
  8. -
  9. Run the installer and follow the on-screen instructions. You will need to enter the serial number that was sent to your email.
  10. -
  11. After the installation is complete, you can launch CorelDRAW X8 and enjoy its features.
  12. -
- -

Note: The free download is a trial version that will expire after 15 days. To continue using CorelDRAW X8, you will need to purchase a license or subscription from the official website. Alternatively, you can use a keygen to generate a serial number for free, but this is not recommended as it may be illegal or unsafe.

- -

Conclusion

- -

CorelDRAW X8 is a great graphics design software that can help you create stunning projects for personal or professional use. However, to use it fully, you need to have a valid serial number that can activate it. You can either download a free trial version from the official website or use a keygen to generate a serial number for free. However, we advise you to purchase a license or subscription from the official website to support the developers and get access to updates and support.

-

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Kitserver Winning Eleven 8 Master.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Kitserver Winning Eleven 8 Master.md deleted file mode 100644 index a442cf822b5f96423818d7b2256a2141baa3e4ca..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Kitserver Winning Eleven 8 Master.md +++ /dev/null @@ -1,28 +0,0 @@ - -

How to Download and Install Kitserver for Winning Eleven 8 Master

-

Kitserver is a popular add-on program for Winning Eleven 8 Master, also known as Pro Evolution Soccer 4, that allows you to customize various aspects of the game, such as kits, balls, stadiums, faces, and more. In this article, we will show you how to download and install Kitserver for Winning Eleven 8 Master on your PC.

-

Step 1: Download Kitserver

-

You can download Kitserver from the official GitHub repository: https://github.com/kitserver/kitserver4. Click on the green "Code" button and choose "Download ZIP". Save the file to your preferred location on your computer.

-

Download Kitserver Winning Eleven 8 Master


Download ✔✔✔ https://byltly.com/2uKyDd



-

Step 2: Extract Kitserver

-

After downloading Kitserver, you need to extract the ZIP file using a program like WinRAR or 7-Zip. You should see a folder called "kitserver4-master". Open it and you will find another folder called "kitserver". This is the folder that you need to copy to your Winning Eleven 8 Master installation directory.

-

Step 3: Install Kitserver

-

Go to your Winning Eleven 8 Master installation directory, which is usually located at C:\Program Files\KONAMI\Winning Eleven 8I. Paste the "kitserver" folder that you copied in the previous step. Your directory structure should look like this:

- -

Now, go to the "kitserver" folder and run setup.exe. You should see your "PES4.exe" in the dropdown list. If Kitserver hasn't been already installed for this executable, the "Install" button should become enabled. Press "Install" button. The installation should happen pretty quickly - in a matter of seconds. Once it is complete, the popup window will display "SUCCESS!" message, or report an error if one occurred. If an error occurs, check if your PES4.exe is not currently in use (i.e. exit the game, if it is currently running). Also, check that PES4.exe is not marked as read-only file.

-

Step 4: Use Kitserver

-

Congratulations! You have successfully installed Kitserver for Winning Eleven 8 Master. Now you can use it to enhance your game experience. To use Kitserver, you need to place additional folders in the "kitserver" folder. Each folder corresponds to a certain team, ball, stadium, face, or other mod. You can find many mods online from various sources, such as https://www.gamefront.com/games/winning-eleven-8. Make sure to follow the instructions provided by each mod creator on how to install and use their mods.

-

To activate Kitserver in-game, you need to use configurable hot-keys that are defined in the "config.txt" file in the "kitserver" folder. For example, by default, you can press F1 and F2 keys to cycle through different kits for Home and Away teams (for players and goalkeepers), and F3 key to cycle through different balls. You can also press F12 key to see which mods are currently loaded by Kitserver.

-

-

For more information on how to use Kitserver, you can read the manual here: http://kitserver.mapote.com/ks7/manual.html.

-

Step 5: Uninstall Kitserver

-

If you want to uninstall Kitserver from your Winning Eleven 8 Master game, you can do so by launching the setup.exe again, selecting PES4.exe, and pressing "Remove" button. After that, you can safely delete the whole "kitserver" folder from your game directory.

7b8c122e87
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Electronic Devices and Circuits by Bogart PDF Free Download The Best Book for Electronics Enthusiasts.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Electronic Devices and Circuits by Bogart PDF Free Download The Best Book for Electronics Enthusiasts.md deleted file mode 100644 index 7d0dbc6c91ed42c5e5001f01fb187a9ebc3336e2..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Electronic Devices and Circuits by Bogart PDF Free Download The Best Book for Electronics Enthusiasts.md +++ /dev/null @@ -1,106 +0,0 @@ -
-

Electronic Devices and Circuits by Bogart PDF Free Download

-

If you are looking for a comprehensive and up-to-date textbook on electronic devices and circuits, you might want to check out Electronic Devices and Circuits by Theodore F. Bogart. This book covers a wide range of topics in modern industrial applications and emerging technologies, using a structured, systems approach. In this article, we will give you an overview of the book, its features, and how you can download it for free.

-

Introduction

-

What are electronic devices and circuits?

-

Electronic devices are components that can manipulate electric signals or currents, such as resistors, capacitors, diodes, transistors, LEDs, etc. Electronic circuits are combinations of electronic devices that perform specific functions, such as amplifiers, oscillators, filters, converters, etc. Electronic devices and circuits are essential for many fields of engineering and science, such as communications, computing, robotics, biomedical, aerospace, etc.

-

electronic devices and circuits by bogart pdf free download


Download Ziphttps://byltly.com/2uKyuz



-

Why study electronic devices and circuits?

-

Studying electronic devices and circuits can help you understand the principles and applications of electronics, which is a rapidly evolving and expanding field. You can learn how to design, analyze, and troubleshoot electronic systems using various tools and techniques. You can also explore the latest developments and innovations in electronic devices and circuits, such as nanoelectronics, optoelectronics, microelectromechanical systems (MEMS), etc.

-

Who is Theodore F. Bogart?

-

Theodore F. Bogart is a professor emeritus of electrical engineering at Pennsylvania State University. He has over 40 years of teaching experience in electronics and has authored or co-authored several textbooks on the subject. He has also received several awards for his excellence in teaching and research. He is the main author of Electronic Devices and Circuits, which was first published in 1993 and has been revised several times since then.

-

Features of the book

-

Structured, systems approach

-

The book uses a structured, systems approach to present electronic devices and circuits in a logical and coherent manner. It starts with the basic concepts of electronics, such as voltage, current, power, resistance, etc., and then introduces the various types of electronic devices and their characteristics. It then shows how these devices can be combined into circuits to perform different functions. It also explains how these circuits can be integrated into larger systems to achieve specific goals.

-

Modern, thorough treatment of topics

-

The book covers a wide range of topics in electronic devices and circuits that are relevant for modern industrial applications and emerging technologies. It includes topics such as semiconductor physics, diode models and applications, bipolar junction transistor (BJT) models and applications, field-effect transistor (FET) models and applications, digital logic circuits, analog-to-digital converters (ADCs), digital-to-analog converters (DACs), etc. It also updates the content with the latest information and examples from real-world situations.

-

Integrated circuit theory and design

-

The book provides extensive coverage of integrated circuit theory and design, which is an important aspect of electronic devices and circuits. It explains how electronic devices can be fabricated on a single chip using various processes and techniques. It also discusses how integrated circuits can be classified into different types based on their functions and complexity levels. It also covers analog and digital integrated circuit design principles and methods.

-

Operational amplifier theory and applications

-

The book devotes several chapters to operational amplifier theory and applications, which is another important aspect of electronic devices and circuits. It describes what an operational amplifier is, how it works, and what its characteristics are. It also shows how operational amplifiers can be used to implement various types of linear and nonlinear circuits, such as amplifiers, filters, comparators, oscillators, etc. It also illustrates how operational amplifiers can be integrated into larger systems to perform complex tasks.

-

bogart electronic devices and circuits pdf download
-free pdf of electronic devices and circuits by bogart
-electronic devices and circuits by bogart ebook download
-download electronic devices and circuits by bogart pdf free
-electronic devices and circuits by bogart 6th edition pdf free download
-bogart electronic devices and circuits book pdf free download
-electronic devices and circuits by bogart solution manual pdf free download
-electronic devices and circuits by bogart 5th edition pdf free download
-how to download electronic devices and circuits by bogart pdf for free
-electronic devices and circuits by bogart online pdf free download
-electronic devices and circuits by bogart 4th edition pdf free download
-electronic devices and circuits by bogart pdf free download google drive
-electronic devices and circuits by bogart 7th edition pdf free download
-electronic devices and circuits by bogart pdf free download quora
-electronic devices and circuits by bogart 3rd edition pdf free download
-best site to download electronic devices and circuits by bogart pdf free
-electronic devices and circuits by bogart lecture notes pdf free download
-electronic devices and circuits by bogart 2nd edition pdf free download
-electronic devices and circuits by bogart 8th edition pdf free download
-electronic devices and circuits by bogart pdf free download reddit
-where can I find electronic devices and circuits by bogart pdf free download
-electronic devices and circuits by bogart lab manual pdf free download
-electronic devices and circuits by bogart 9th edition pdf free download
-electronic devices and circuits by bogart mcq pdf free download
-is it legal to download electronic devices and circuits by bogart pdf for free
-electronic devices and circuits by bogart ppt slides pdf free download
-electronic devices and circuits by bogart objective questions pdf free download
-electronic devices and circuits by bogart previous year question papers pdf free download
-advantages of downloading electronic devices and circuits by bogart pdf for free
-reviews of electronic devices and circuits by bogart pdf free download
-alternatives to electronic devices and circuits by bogart pdf free download
-tips for downloading electronic devices and circuits by bogart pdf for free
-electronic devices and circuits by bogart syllabus pdf free download
-comparison of electronic devices and circuits by bogart with other books pdf free download
-summary of electronic devices and circuits by bogart pdf free download
-features of electronic devices and circuits by bogart pdf free download
-benefits of reading electronic devices and circuits by bogart pdf for free
-challenges of downloading electronic devices and circuits by bogart pdf for free
-examples of projects using electronic devices and circuits by bogart pdf for free
-testimonials of students who downloaded electronic devices and circuits by bogart pdf for free
-how to cite electronic devices and circuits by bogart pdf in your paper or report for free
-how to share electronic devices and circuits by bogart pdf with your friends or classmates for free
-how to print or convert electronic devices and circuits by bogart pdf to other formats for free
-how to access or view electronic devices and circuits by bogart pdf on different devices for free
-how to edit or annotate electronic devices and circuits by bogart pdf for your study or research for free
-how to learn or teach from electronic devices and circuits by bogart pdf for free
-how to use or apply the concepts from electronic devices and circuits by bogart in your projects or assignments for free

-

Specialized electronic devices and circuits

-

The book also covers some specialized electronic devices and circuits that are useful for specific purposes or emerging fields. It includes topics such as switching regulators, optoelectronics, MEMS, nanoelectronics, etc. It explains what these devices and circuits are, how they work, and what their advantages and disadvantages are. It also gives examples of their applications and challenges.

-

How to download the book for free?

-

Internet Archive

-

One way to download the book for free is to use the Internet Archive, which is a non-profit digital library that offers free access to millions of books, movies, music, software, etc. You can find the PDF version of Electronic Devices and Circuits by Bogart on this website. You can either read it online or download it to your device. You can also borrow it for 14 days if you create an account on the website.

-

Google Books

-

Another way to download the book for free is to use Google Books, which is a service that allows you to search and preview millions of books from various publishers and libraries. You can find a preview version of Electronic Devices and Circuits by Bogart on this website. You can read some pages of the book online or download them as PDF files. However, you may not be able to access the full content of the book unless you buy it or find it in a library.

-

Other sources

-

Besides these two websites, you may also find other sources that offer free downloads of Electronic Devices and Circuits by Bogart. However, you should be careful when using these sources as they may not be legal or safe. Some of them may contain viruses or malware that can harm your device or steal your personal information. Some of them may also violate the copyright laws or terms of service of the original publishers or authors. Therefore, you should always check the credibility and reliability of these sources before downloading anything from them.

-

Conclusion

-

In conclusion, Electronic Devices and Circuits by Bogart is a comprehensive and up-to-date textbook on electronic devices and circuits that covers a wide range of topics in modern industrial applications and emerging technologies. It uses a structured, systems approach to present electronic devices and circuits in a logical and coherent manner. It also provides extensive coverage of integrated circuit theory and design, operational amplifier theory and applications, and specialized electronic devices and circuits. If you want to download this book for free, you can use the Internet Archive or Google Books websites, or look for other sources that may offer free downloads. However, you should always be careful when using these sources as they may not be legal or safe.

-

FAQs

-
    -
  1. What is the difference between electronic devices and electrical devices?
  2. -

    An electronic device is a component that can manipulate electric signals or currents based on some logic or function. An electrical device is a component that can convert electric energy into other forms of energy or vice versa based on some physical principle.

    -
  3. What are some examples of electronic devices?
  4. -

    Some examples of electronic devices are resistors, capacitors, diodes, transistors, LEDs, LCDs, sensors, microcontrollers, etc.

    -
  5. What are some examples of electronic circuits?
  6. -

    Some examples of electronic circuits are amplifiers, oscillators, filters, converters, counters, adders, multiplexers, etc.

    -
  7. What are some benefits of studying electronic devices and circuits?
  8. -

    Some benefits of studying electronic devices and circuits are:

    - -
  9. What are some challenges of studying electronic devices and circuits?
  10. -

    Some challenges of studying electronic devices and circuits are:

    - -
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/FSX ORBX VECTOR 1.51 The Ultimate Vector Data for Flight Simulator.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/FSX ORBX VECTOR 1.51 The Ultimate Vector Data for Flight Simulator.md deleted file mode 100644 index 5fbd9b2b07777cd538d67dcea7a9503bc9302816..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/FSX ORBX VECTOR 1.51 The Ultimate Vector Data for Flight Simulator.md +++ /dev/null @@ -1,149 +0,0 @@ - -

FSX ORBX VECTOR 1.51 Download for Computer

-

If you are a fan of flight simulation games, you might have heard of FSX ORBX VECTOR 1.51. This is a product that enhances the realism and accuracy of your virtual world by adding vector data to your scenery. In this article, we will explain what FSX ORBX VECTOR 1.51 is, how to download and install it on your computer, why you should use it, and how to get the most out of it.

-

FSX ORBX VECTOR 1.51 download for computer


DOWNLOAD 🔗 https://byltly.com/2uKy2Y



-

What is FSX ORBX VECTOR 1.51?

-

FSX ORBX VECTOR 1.51 is a product developed by ORBX, a company that specializes in creating high-quality scenery addons for flight simulation games. FSX ORBX VECTOR 1.51 is designed for Microsoft Flight Simulator X (FSX) and Lockheed Martin Prepar3D (P3D), two of the most popular flight simulation platforms.

-

FSX ORBX VECTOR 1.51 adds vector data to your scenery, which means it improves the accuracy and detail of features such as coastlines, rivers, lakes, roads, railways, bridges, power lines, golf courses, parks, and more. It also corrects some errors and anomalies in the default scenery, such as misplaced or missing features, unrealistic shapes or sizes, or incorrect elevations.

-

FSX ORBX VECTOR 1.51 covers the entire world with over 78 million square kilometers of vector data. It also includes a vector configuration tool that allows you to customize the settings and options of the product according to your preferences and system performance.

-

How to download and install FSX ORBX VECTOR 1.51 on your computer

-

If you are interested in using FSX ORBX VECTOR 1.51 on your computer, you will need to follow these steps:

-

Requirements and compatibility

-

Before you download and install FSX ORBX VECTOR 1.51, you need to make sure that your computer meets the minimum requirements and that you have a compatible flight simulation platform.

-

The minimum requirements for FSX ORBX VECTOR 1.51 are:

- -

The compatible flight simulation platforms are:

-

How to install FSX ORBX VECTOR 1.51 on P3D
-FSX ORBX VECTOR 1.51 review and comparison
-FSX ORBX VECTOR 1.51 download link and instructions
-FSX ORBX VECTOR 1.51 vs FTX Global Base
-FSX ORBX VECTOR 1.51 new features and improvements
-FSX ORBX VECTOR 1.51 system requirements and compatibility
-FSX ORBX VECTOR 1.51 best settings and tips
-FSX ORBX VECTOR 1.51 update and patch notes
-FSX ORBX VECTOR 1.51 free trial and demo
-FSX ORBX VECTOR 1.51 discount and coupon code
-FSX ORBX VECTOR 1.51 support and troubleshooting
-FSX ORBX VECTOR 1.51 scenery and airport list
-FSX ORBX VECTOR 1.51 screenshots and videos
-FSX ORBX VECTOR 1.51 performance and FPS
-FSX ORBX VECTOR 1.51 bugs and issues
-FSX ORBX VECTOR 1.51 alternatives and competitors
-FSX ORBX VECTOR 1.51 addons and mods
-FSX ORBX VECTOR 1.51 forum and community
-FSX ORBX VECTOR 1.51 license and activation
-FSX ORBX VECTOR 1.51 refund and cancellation policy
-How to uninstall FSX ORBX VECTOR 1.51 from your computer
-How to backup and restore FSX ORBX VECTOR 1.51 data
-How to upgrade from previous versions of FSX ORBX VECTOR
-How to customize and tweak FSX ORBX VECTOR 1.51 settings
-How to fix common errors and problems with FSX ORBX VECTOR 1.51
-How to optimize your PC for running FSX ORBX VECTOR 1.51 smoothly
-How to use FSX ORBX VECTOR 1.51 with other flight simulators
-How to get the most out of FSX ORBX VECTOR 1.51 features
-How to fly with realistic weather and traffic using FSX ORBX VECTOR 1.51
-How to create your own scenery and airports with FSX ORBX VECTOR 1.51 tools
-What are the benefits of using FSX ORBX VECTOR 1.51 for your flight simulation
-What are the drawbacks and limitations of using FSX ORBX VECTOR 1.51 for your flight simulation
-What are the differences between FSX ORBX VECTOR 1.51 and other vector products for flight simulators
-What are the best sources and resources for learning more about FSX ORBX VECTOR 1.51
-What are the best practices and recommendations for using FSX ORBX VECTOR 1.51 effectively
-What are the latest news and updates on FSX ORBX VECTOR 1.51 development and release
-What are the best places and websites to buy or download FSX ORBX VECTOR 1.51 legally and safely
-What are the best ways and methods to test and evaluate FSX ORBX VECTOR 1.51 quality and performance
-What are the best examples and showcases of using FSX ORBX VECTOR 1.51 for your flight simulation projects
-What are the best reviews and ratings of FSX ORBX VECTOR 1.51 by experts and users

- -

Steps to download and install

-

Once you have verified that your computer meets the requirements and that you have a compatible flight simulation platform, you can proceed to download and install FSX ORBX VECTOR 1.51.

-
    -
  1. Go to the official website of ORBX at https://orbxdirect.com/ and create an account if you don't have one already.
  2. -
  3. Browse the products section and find FSX ORBX VECTOR 1.51 under the Global Range category.
  4. -
  5. Add the product to your cart and proceed to checkout.
  6. -
  7. Complete the payment process using your preferred method.
  8. -
  9. Download the product using the ORBX Central application, which will be automatically installed on your computer after purchase.
  10. -
  11. Launch the ORBX Central application and select your flight simulation platform from the menu.
  12. -
  13. Select FSX ORBX VECTOR 1.51 from the list of products and click on Install.
  14. -
  15. Wait for the installation process to finish.
  16. -
  17. Launch your flight simulation platform and enjoy FSX ORBX VECTOR 1.51.
  18. -
-

Troubleshooting tips

-

If you encounter any issues or problems while downloading or installing FSX ORBX VECTOR 1.51, here are some tips that might help you:

- -

Why should you use FSX ORBX VECTOR 1.51?

-

Now that you know what FSX ORBX VECTOR 1.51 is and how to download and install it on your computer, you might be wondering why you should use it. What are the benefits of using FSX ORBX VECTOR 1.51 for your flight simulation experience?

-

Well, there are many reasons why FSX ORBX VECTOR 1.51 is a great product for flight simulation enthusiasts. Here are some of them:

-

The benefits of using FSX ORBX VECTOR 1.51 for your flight simulation experience

-

Enhanced realism and accuracy

-

One of the main benefits of using FSX ORBX VECTOR 1.51 is that it enhances the realism and accuracy of your virtual world. By adding vector data to your scenery, it makes your environment look more natural and authentic. You will be able to see features such as coastlines, rivers, lakes, roads, railways, bridges, power lines, golf courses, parks, and more in their correct locations, shapes, sizes, and elevations.

-

This will make your flight simulation experience more immersive and enjoyable. You will be able to explore different regions and airports with more detail and variety. You will also be able to follow real-world navigation charts and procedures with more confidence and accuracy.

-

Improved performance and stability

-

Another benefit of using FSX ORBX VECTOR 1.51 is that it improves the performance and stability of your flight simulation platform. By using a smart compression technology, it reduces the size of the vector data files without compromising the quality. This means that it will not take up too much space on your hard disk or memory.

-

FSX ORBX VECTOR 1.51 also optimizes the loading and rendering of the vector data according to your system performance and settings. This means that it will not cause any significant impact on your frame rates or loading times. You will be able to enjoy a smooth and stable flight simulation experience without any lag or stutter.

-

Customizable settings and options

-

A third benefit of using FSX ORBX VECTOR 1.51 is that it offers customizable settings and options for your convenience and preference. By using the vector configuration tool that comes with the product, you will be able to adjust various aspects of the vector data according to your needs and desires.

-

For example, you will be able to enable or disable certain features such as roads, railways, bridges, power lines, golf courses, parks, and more. You will also be able to change the colors, widths, densities, and styles of these features to suit your taste. You will also be able to fine-tune the elevation correction settings to avoid any conflicts or errors with other scenery addons or mesh products.

-

How to get the most out of FSX ORBX VECTOR 1.51?

-

Finally, you might be wondering how to get the most out of FSX ORBX VECTOR 1.51. How can you optimize your FSX ORBX VECTOR 1.51 usage to enhance your flight simulation experience even further?

-

Well, there are some tips and tricks that you can follow to make the best use of FSX ORBX VECTOR 1.51. Here are some of them:

-

Some tips and tricks to optimize your FSX ORBX VECTOR 1.51 usage

-

Adjusting the vector configuration tool

-

As mentioned earlier, the vector configuration tool allows you to customize the settings and options of FSX ORBX VECTOR 1.51 according to your preferences and system performance. However, you should also be aware that some features might have more impact on your frame rates or loading times than others.

-

For example, roads and railways might have more impact than coastlines or rivers because they have more segments and curves. Therefore, you might want to reduce the width or density of these features if you have a lower-end system or if you want to improve your performance.

-

You should also experiment with different combinations of features and colors to find the best balance between realism and performance for your system and taste.

-

Using compatible scenery addons and mesh products

-

Another tip is to use compatible scenery addons and mesh products with FSX ORBX VECTOR 1.51 to enhance your virtual world even more. Scenery addons are products that add more detail and variety to specific regions or airports in your scenery. Mesh products are products that improve the elevation data of your terrain.

-

By using compatible scenery addons and mesh products with FSX ORBX VECTOR 1.51, you will be able to enjoy a more realistic and diverse environment with more features and landmarks. However, you should also make sure that these products are placed in the correct order in your scenery library to avoid any conflicts or errors.

-

The recommended order for placing these products in your scenery library is:

-
    -
  1. Your custom airports or regions
  2. -
  3. Your mesh products
  4. -
  5. Your landclass products
  6. -
  7. Your global base products
  8. -
  9. Your global vector products (FSX ORBX VECTOR 1.51)
  10. -
  11. Your default scenery
  12. -
-

Exploring different regions and airports with FSX ORBX VECTOR 1.51

-

A final tip is to explore different regions and airports with FSX ORBX VECTOR 1.51 to enjoy its full potential. By using FSX ORBX VECTOR 1.51, you will be able to see more detail and variety in your virtual world than ever before.

-

You will be able to discover new places and landmarks that you might have missed before. You will also be able to fly over different terrains and landscapes with more realism and accuracy.

-

You can use online resources such as Google Maps or Wikipedia to find interesting regions or airports to visit with FSX ORBX VECTOR 1.51. You can also use online flight planners such as SimBrief or SkyVector to plan realistic routes and procedures with FSX ORBX VECTOR 1.51.

-

Conclusion

-

Conclusion

-

In conclusion, FSX ORBX VECTOR 1.51 is a product that enhances the realism and accuracy of your virtual world by adding vector data to your scenery. It covers the entire world with over 78 million square kilometers of vector data that improves the detail and quality of features such as coastlines, rivers, lakes, roads, railways, bridges, power lines, golf courses, parks, and more. It also corrects some errors and anomalies in the default scenery.

-

FSX ORBX VECTOR 1.51 also offers many benefits for your flight simulation experience. It improves the performance and stability of your flight simulation platform by using a smart compression technology and optimizing the loading and rendering of the vector data. It also offers customizable settings and options for your convenience and preference by using a vector configuration tool.

-

FSX ORBX VECTOR 1.51 is compatible with Microsoft Flight Simulator X (FSX) and Lockheed Martin Prepar3D (P3D), two of the most popular flight simulation platforms. It is easy to download and install on your computer by using the ORBX Central application. It also works well with other scenery addons and mesh products that are compatible with FSX ORBX VECTOR 1.51.

-

FSX ORBX VECTOR 1.51 is a great product for flight simulation enthusiasts who want to enhance their virtual world and enjoy a more immersive and enjoyable flight simulation experience. You can get FSX ORBX VECTOR 1.51 from the official website of ORBX at https://orbxdirect.com/ for $69.95 USD.

-

FAQs

-

Here are some frequently asked questions about FSX ORBX VECTOR 1.51:

-

What is the difference between FSX ORBX VECTOR 1.51 and FSX ORBX Global Base?

-

FSX ORBX Global Base is another product by ORBX that improves the texture and color of your terrain by replacing the default landclass data with high-resolution photorealistic data. FSX ORBX VECTOR 1.51 complements FSX ORBX Global Base by adding vector data to your scenery that improves the accuracy and detail of features such as coastlines, rivers, lakes, roads, railways, bridges, power lines, golf courses, parks, and more.

-

Do I need FSX ORBX Global Base to use FSX ORBX VECTOR 1.51?

-

No, you do not need FSX ORBX Global Base to use FSX ORBX VECTOR 1.51. However, it is highly recommended that you use both products together to get the best results for your scenery.

-

Can I use FSX ORBX VECTOR 1.51 with other scenery addons or mesh products?

-

Yes, you can use FSX ORBX VECTOR 1.51 with other scenery addons or mesh products that are compatible with FSX ORBX VECTOR 1.51. However, you should make sure that these products are placed in the correct order in your scenery library to avoid any conflicts or errors.

-

How can I update FSX ORBX VECTOR 1.51 to the latest version?

-

You can update FSX ORBX VECTOR 1.51 to the latest version by using the ORBX Central application. Simply launch the application and select your flight simulation platform from the menu. Then select FSX ORBX VECTOR 1.51 from the list of products and click on Update.

-

How can I contact the ORBX support team if I have any questions or issues with FSX ORBX VECTOR 1.51?

-

You can contact the ORBX support team by visiting their website at https://orbxdirect.com/support. You can also join their online community at https://orbxsystems.com/forum/ where you can find helpful resources and interact with other users and developers.

-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Counter-Strike 1.6 V40.1 NonSteam - DiGiTALZONE.rar.rar _HOT_.md b/spaces/1gistliPinn/ChatGPT4/Examples/Counter-Strike 1.6 V40.1 NonSteam - DiGiTALZONE.rar.rar _HOT_.md deleted file mode 100644 index 43f8821792a3e4d60f40fcdb31f3ed0ebed15467..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Counter-Strike 1.6 V40.1 NonSteam - DiGiTALZONE.rar.rar _HOT_.md +++ /dev/null @@ -1,6 +0,0 @@ -

Counter-Strike 1.6 V40.1 NonSteam - DiGiTALZONE.rar.rar


DOWNLOAD ->->->-> https://imgfil.com/2uxWXr



- -SnowBall War Mod CS 1.6 Counter Strike 1 6 Non Steam ... 2010 counter the and strike ZONE cs 1.6 v40 digitalzone free The exe Download v32 V40 Strike ... 2010).rar 6 download locations Counter Strike 1 6 Non Steam v48 Rev Emu Steam ... 4d29de3e1b
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Flamingo 2 Rhino 5 Crack.md b/spaces/1gistliPinn/ChatGPT4/Examples/Flamingo 2 Rhino 5 Crack.md deleted file mode 100644 index a3ebe1f2f573eb6068944f0f1d394207e17c5ddb..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Flamingo 2 Rhino 5 Crack.md +++ /dev/null @@ -1,134 +0,0 @@ -

flamingo 2 rhino 5 crack


Download Ziphttps://imgfil.com/2uy07K



-
-mantle 7 manta 1 - -mandarin 7 manta 1 - -marigold 1 marigold 5 - -meadow 1 meadow 5 - -melon 6 melon 1 - -meringue 1 meringue 2 - -meteor 2 meteor 2 - -minaret 2 minaret 2 - -mongoose 4 mongoose 4 - -monkey 1 monkey 4 - -monkey 2 monkey 4 - -moss 6 moss 1 - -mousetrap 4 mousetrap 2 - -mushroom 5 mushroom 2 - -musk 1 musk 2 - -mustache 3 mustache 3 - -nautilus 7 nautilus 1 - -neptune 7 neptune 1 - -oar 3 oar 3 - -octopus 7 octopus 1 - -orchid 4 orchid 1 - -orca 7 orca 1 - -otter 3 otter 4 - -owl 3 owl 4 - -ox 1 ox 5 - -palm 2 palm 6 - -palm 6 palm 2 - -parrot 2 parrot 6 - -parrot 5 parrot 2 - -peanut 7 peanut 1 - -pear 1 pear 7 - -pear 7 pear 1 - -pear 3 pear 4 - -pencil 2 pencil 2 - -penguin 3 penguin 5 - -penguin 4 penguin 5 - -penguin 5 penguin 4 - -pepper 3 pepper 3 - -pig 7 pig 1 - -pig 2 pig 3 - -pinecone 3 pinecone 1 - -plank 7 plank 1 - -plank 6 plank 4 - -plum 1 plum 7 - -plum 2 plum 7 - -pond 2 pond 2 - -pond 6 pond 2 - -pond 7 pond 1 - -pond 5 pond 6 - -pond 6 pond 5 - -porcupine 3 porcupine 4 - -potted-plant 5 potted-plant 2 - -potted-plant 4 potted-plant 6 - -potted-plant 7 potted-plant 3 - -potted-plant 6 potted-plant 4 - -potted-plant 7 potted-plant 6 - -printer 2 printer 2 - -rabbit 4 rabbit 5 - -rabbit 2 rabbit 4 - -rag 2 rag 3 - -rag 4 rag 3 - -rail 1 rail 6 - -rail 2 rail 6 - -railway 2 railway 6 - -rainbow 2 4fefd39f24
-
-
-

diff --git a/spaces/A00001/bingothoo/src/components/settings.tsx b/spaces/A00001/bingothoo/src/components/settings.tsx deleted file mode 100644 index 45ba6044ff9cbe584f62292a49ea2ace9acc1f48..0000000000000000000000000000000000000000 --- a/spaces/A00001/bingothoo/src/components/settings.tsx +++ /dev/null @@ -1,157 +0,0 @@ -import { useEffect, useState } from 'react' -import { useAtom } from 'jotai' -import { Switch } from '@headlessui/react' -import { toast } from 'react-hot-toast' -import { hashAtom, voiceAtom } from '@/state' -import { - Dialog, - DialogContent, - DialogDescription, - DialogFooter, - DialogHeader, - DialogTitle -} from '@/components/ui/dialog' -import { Button } from './ui/button' -import { Input } from './ui/input' -import { ChunkKeys, parseCookies, extraCurlFromCookie, encodeHeadersToCookie, getCookie, setCookie } from '@/lib/utils' -import { ExternalLink } from './external-link' -import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard' - - -export function Settings() { - const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 }) - const [loc, setLoc] = useAtom(hashAtom) - const [curlValue, setCurlValue] = useState(extraCurlFromCookie(parseCookies(document.cookie, ChunkKeys))) - const [imageOnly, setImageOnly] = useState(getCookie('IMAGE_ONLY') !== '0') - const [enableTTS, setEnableTTS] = useAtom(voiceAtom) - - useEffect(() => { - if (isCopied) { - toast.success('复制成功') - } - }, [isCopied]) - - if (loc === 'settings') { - return ( - setLoc('')} modal> - - - 设置你的用户信息 - - 请使用 Edge 浏览器 - - 打开并登录 Bing - - ,然后再打开 - Challenge 接口 - 右键 》检查。打开开发者工具,在网络里面找到 Create 接口 》右键复制》复制为 cURL(bash),粘贴到此处,然后保存。 -
- 图文示例: - 如何获取 BING_HEADER - - -
- -
- setCurlValue(e.target.value)} - /> -
- 身份信息仅用于画图(推荐) - setImageOnly(checked)} - > - - -
- - - - - - - -
- ) - } else if (loc === 'voice') { - return ( - setLoc('')} modal> - - - 语音设置 - - 目前仅支持 PC 端 Edge 及 Chrome 浏览器 - - - -
- 启用语音回答 - setEnableTTS(checked)} - > - - -
- - - - -
-
- ) - } - return null -} diff --git a/spaces/A00001/bingothoo/src/pages/api/image.ts b/spaces/A00001/bingothoo/src/pages/api/image.ts deleted file mode 100644 index fbc0c8def432ba212d27347471670d3b6202463d..0000000000000000000000000000000000000000 --- a/spaces/A00001/bingothoo/src/pages/api/image.ts +++ /dev/null @@ -1,40 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { debug } from '@/lib/isomorphic' -import { createHeaders } from '@/lib/utils' -import { createImage } from '@/lib/bots/bing/utils' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - const { prompt, id } = req.query - if (!prompt) { - return res.json({ - result: { - value: 'Image', - message: 'No Prompt' - } - }) - } - try { - const headers = createHeaders(req.cookies, { - IMAGE_BING_COOKIE: process.env.IMAGE_BING_COOKIE - }, 'image') - - debug('headers', headers) - const response = await createImage(String(prompt), String(id), { - ...headers, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - }) - res.writeHead(200, { - 'Content-Type': 'text/plain; charset=UTF-8', - }) - return res.end(response) - } catch (e) { - return res.json({ - result: { - value: 'Error', - message: `${e}` - } - }) - } -} diff --git a/spaces/AI-Dashboards/AI.Dashboard.HEDIS.Terms.Vocabulary/README.md b/spaces/AI-Dashboards/AI.Dashboard.HEDIS.Terms.Vocabulary/README.md deleted file mode 100644 index f5364d8a6e113372dbff7c7e912e15863432445e..0000000000000000000000000000000000000000 --- a/spaces/AI-Dashboards/AI.Dashboard.HEDIS.Terms.Vocabulary/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: AI.Dashboard.HEDIS.Terminology.Vocabulary.Codes -emoji: 😻 -colorFrom: red -colorTo: pink -sdk: static -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/tts/PortaSpeech.py b/spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/tts/PortaSpeech.py deleted file mode 100644 index 1e0222284a60f7de0142851ff7ad2bf26fb5ea76..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/tts/PortaSpeech.py +++ /dev/null @@ -1,85 +0,0 @@ -import torch -from inference.tts.base_tts_infer import BaseTTSInfer -from utils.ckpt_utils import load_ckpt -from modules.portaspeech.portaspeech import PortaSpeech - -class TTSInference(BaseTTSInfer): - def __init__(self, hparams, device=None): - super().__init__(hparams, device) - print("Initializing TTS model to %s" % device) - self.spk_map = self.preprocessor.load_spk_map(self.data_dir) - print("TTS loaded!") - - def build_model(self): - model = PortaSpeech(self.ph_encoder, self.word_encoder) - load_ckpt(model, self.hparams['work_dir'], 'model') - with torch.no_grad(): - model.store_inverse_all() - return model - - def forward_model(self, inp): - sample = self.input_to_batch(inp) - with torch.no_grad(): - output = self.model( - sample['txt_tokens'], - sample['word_tokens'], - ph2word=sample['ph2word'], - word_len=sample['word_lengths'].max(), - infer=True, - forward_post_glow=True, - spk_id=sample.get('spk_ids') - ) - mel_out = output['mel_out'] - wav_out = self.run_vocoder(mel_out) - wav_out = wav_out.cpu().numpy() - return wav_out[0] - - def preprocess_input(self, inp): - """ - - :param inp: {'text': str, 'item_name': (str, optional), 'spk_name': (str, optional)} - :return: - """ - preprocessor, preprocess_args = self.preprocessor, self.preprocess_args - text_raw = inp['text'] - item_name = inp.get('item_name', '') - spk_name = inp.get('spk_name', '') - ph, txt, word, ph2word, ph_gb_word = preprocessor.txt_to_ph( - preprocessor.txt_processor, text_raw, preprocess_args) - word_token = self.word_encoder.encode(word) - ph_token = self.ph_encoder.encode(ph) - spk_id = self.spk_map[spk_name] - item = {'item_name': item_name, 'text': txt, 'ph': ph, 'spk_id': spk_id, - 'ph_token': ph_token, 'word_token': word_token, 'ph2word': ph2word, - 'ph_words':ph_gb_word, 'words': word} - item['ph_len'] = len(item['ph_token']) - return item - - def input_to_batch(self, item): - item_names = [item['item_name']] - text = [item['text']] - ph = [item['ph']] - txt_tokens = torch.LongTensor(item['ph_token'])[None, :].to(self.device) - txt_lengths = torch.LongTensor([txt_tokens.shape[1]]).to(self.device) - word_tokens = torch.LongTensor(item['word_token'])[None, :].to(self.device) - word_lengths = torch.LongTensor([txt_tokens.shape[1]]).to(self.device) - ph2word = torch.LongTensor(item['ph2word'])[None, :].to(self.device) - spk_ids = torch.LongTensor(item['spk_id'])[None, :].to(self.device) - batch = { - 'item_name': item_names, - 'text': text, - 'ph': ph, - 'txt_tokens': txt_tokens, - 'txt_lengths': txt_lengths, - 'word_tokens': word_tokens, - 'word_lengths': word_lengths, - 'ph2word': ph2word, - 'spk_ids': spk_ids, - } - return batch - - def postprocess_output(self, output): - return output - - - diff --git a/spaces/AISuperheroes/03GR-Chatbot-Memory/app.py b/spaces/AISuperheroes/03GR-Chatbot-Memory/app.py deleted file mode 100644 index 81a521248e8f7cdad40078742a14e97db5f9cc8b..0000000000000000000000000000000000000000 --- a/spaces/AISuperheroes/03GR-Chatbot-Memory/app.py +++ /dev/null @@ -1,137 +0,0 @@ -from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration -import torch -import gradio as gr - - -# PersistDataset ----- -import os -import csv -import gradio as gr -from gradio import inputs, outputs -import huggingface_hub -from huggingface_hub import Repository, hf_hub_download, upload_file -from datetime import datetime -DATASET_REPO_URL = "https://huggingface.co/datasets/awacke1/Carddata.csv" -DATASET_REPO_ID = "awacke1/Carddata.csv" -DATA_FILENAME = "Carddata.csv" -DATA_FILE = os.path.join("data", DATA_FILENAME) -HF_TOKEN = os.environ.get("HF_TOKEN") - -SCRIPT = """ - -""" - -try: - hf_hub_download( - repo_id=DATASET_REPO_ID, - filename=DATA_FILENAME, - cache_dir=DATA_DIRNAME, - force_filename=DATA_FILENAME - ) -except: - print("file not found") -repo = Repository( - local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN -) - -def generate_html() -> str: - with open(DATA_FILE) as csvfile: - reader = csv.DictReader(csvfile) - rows = [] - for row in reader: - rows.append(row) - rows.reverse() - if len(rows) == 0: - return "no messages yet" - else: - html = "
" - for row in rows: - html += "
" - html += f"{row['inputs']}" - html += f"{row['outputs']}" - html += "
" - html += "
" - return html - -def store_message(name: str, message: str): - if name and message: - with open(DATA_FILE, "a") as csvfile: - writer = csv.DictWriter(csvfile, fieldnames=["name", "message", "time"]) - writer.writerow( - {"name": name.strip(), "message": message.strip(), "time": str(datetime.now())} - ) - commit_url = repo.push_to_hub() - return "" - -iface = gr.Interface( - store_message, - [ - inputs.Textbox(placeholder="Your name"), - inputs.Textbox(placeholder="Your message", lines=2), - ], - "html", - css=""" - .message {background-color:cornflowerblue;color:white; padding:4px;margin:4px;border-radius:4px; } - """, - title="Reading/writing to a HuggingFace dataset repo from Spaces", - description=f"This is a demo of how to do simple *shared data persistence* in a Gradio Space, backed by a dataset repo.", - article=f"The dataset repo is [{DATASET_REPO_URL}]({DATASET_REPO_URL})", -) - - -mname = "facebook/blenderbot-400M-distill" -model = BlenderbotForConditionalGeneration.from_pretrained(mname) -tokenizer = BlenderbotTokenizer.from_pretrained(mname) - -def take_last_tokens(inputs, note_history, history): - """Filter the last 128 tokens""" - if inputs['input_ids'].shape[1] > 128: - inputs['input_ids'] = torch.tensor([inputs['input_ids'][0][-128:].tolist()]) - inputs['attention_mask'] = torch.tensor([inputs['attention_mask'][0][-128:].tolist()]) - note_history = [' '.join(note_history[0].split(' ')[2:])] - history = history[1:] - return inputs, note_history, history - -def add_note_to_history(note, note_history): - """Add a note to the historical information""" - note_history.append(note) - note_history = ' '.join(note_history) - return [note_history] - -title = "Chatbot State of the Art now with Memory Saved to Dataset" -description = """Chatbot With Memory""" - -def chat(message, history): - history = history or [] - if history: - history_useful = [' '.join([str(a[0])+' '+str(a[1]) for a in history])] - else: - history_useful = [] - history_useful = add_note_to_history(message, history_useful) - inputs = tokenizer(history_useful, return_tensors="pt") - inputs, history_useful, history = take_last_tokens(inputs, history_useful, history) - reply_ids = model.generate(**inputs) - response = tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0] - history_useful = add_note_to_history(response, history_useful) - list_history = history_useful[0].split(' ') - history.append((list_history[-2], list_history[-1])) - store_message(message, response) # Save to dataset - return history, history - -gr.Interface( - fn=chat, - theme="huggingface", - css=".footer {display:none !important}", - inputs=["text", "state"], - outputs=["chatbot", "state"], - title=title, - allow_flagging="never", - description=f"Gradio chatbot backed by memory in a dataset repository.", - article=f"The dataset repo is [{DATASET_REPO_URL}]({DATASET_REPO_URL})" - ).launch() \ No newline at end of file diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py deleted file mode 100644 index a4ea15984a0063c06e09eb5063d49b2cf90371cf..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py +++ /dev/null @@ -1,56 +0,0 @@ -_base_ = [ - '../_base_/models/resnet50.py', - '../_base_/datasets/imagenet_bs256_rsb_a12.py', - '../_base_/schedules/imagenet_bs2048_rsb.py', - '../_base_/default_runtime.py' -] - -# model settings -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), - drop_path_rate=0.05, - ), - head=dict( - loss=dict( - type='LabelSmoothLoss', - label_smooth_val=0.1, - mode='original', - use_sigmoid=True, - )), - train_cfg=dict(augments=[ - dict(type='Mixup', alpha=0.2), - dict(type='CutMix', alpha=1.0) - ]), -) - -# dataset settings -train_dataloader = dict(sampler=dict(type='RepeatAugSampler', shuffle=True)) - -# schedule settings -optim_wrapper = dict( - optimizer=dict(weight_decay=0.01), - paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.), -) - -param_scheduler = [ - # warm up learning rate scheduler - dict( - type='LinearLR', - start_factor=0.0001, - by_epoch=True, - begin=0, - end=5, - # update by iter - convert_to_iter_based=True), - # main learning rate scheduler - dict( - type='CosineAnnealingLR', - T_max=595, - eta_min=1.0e-6, - by_epoch=True, - begin=5, - end=600) -] - -train_cfg = dict(by_epoch=True, max_epochs=600) diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py deleted file mode 100644 index df8edc0370400a3f3985c33bffae2d04afc55772..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py +++ /dev/null @@ -1,46 +0,0 @@ -_base_ = [ - '../_base_/models/resnet50.py', - '../_base_/datasets/imagenet_bs256_rsb_a12.py', - '../_base_/schedules/imagenet_bs2048_rsb.py', - '../_base_/default_runtime.py' -] - -# model settings -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), - drop_path_rate=0.05, - ), - head=dict(loss=dict(use_sigmoid=True)), - train_cfg=dict(augments=[ - dict(type='Mixup', alpha=0.1), - dict(type='CutMix', alpha=1.0) - ])) - -# dataset settings -train_dataloader = dict(sampler=dict(type='RepeatAugSampler', shuffle=True)) - -# schedule settings -optim_wrapper = dict( - paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.)) - -param_scheduler = [ - # warm up learning rate scheduler - dict( - type='LinearLR', - start_factor=0.0001, - by_epoch=True, - begin=0, - end=5, - # update by iter - convert_to_iter_based=True), - # main learning rate scheduler - dict( - type='CosineAnnealingLR', - T_max=295, - eta_min=1.0e-6, - by_epoch=True, - begin=5, - end=300) -] -train_cfg = dict(by_epoch=True, max_epochs=300) diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/radio/Radio.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/radio/Radio.js deleted file mode 100644 index af01a3584e9e5e330c2929b2dbc7b89875e21f8f..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/radio/Radio.js +++ /dev/null @@ -1,82 +0,0 @@ -import Base from '../base/Base.js'; -import { Circle, Lines } from '../utils/Geoms.js'; -import Yoyo from '../utils/Yoyo.js'; - -const Linear = Phaser.Math.Linear; -const ExpoIn = Phaser.Math.Easing.Expo.In; - -class Radio extends Base { - constructor(scene, config) { - super(scene, config); - this.type = 'rexSpinnerRadio'; - } - - buildShapes() { - this.addShape((new Circle()).setName('center')); - this.addShape((new Lines()).setName('arc0')); - this.addShape((new Lines()).setName('arc1')); - } - - updateShapes() { - var centerX = this.centerX; - var centerY = this.centerY; - var radius = this.radius; - var isSizeChanged = this.isSizeChanged; - - var centerRadius = (radius * 2) / 6; - var x = centerX - radius + centerRadius; - var y = centerY + radius - centerRadius; - - var shapes = this.getShapes(); - for (var i = 0, cnt = shapes.length; i < cnt; i++) { - var shape = shapes[i]; - - var t = (this.value + ((cnt - i) * 0.1)) % 1; - t = ExpoIn(Yoyo(t)); - - switch (shape.name) { - case 'center': - shape.fillStyle(this.color, Linear(0.25, 1, t)) - - if (isSizeChanged) { - shape - .setRadius(centerRadius) - .setCenterPosition(x, y); - } - break; - case 'arc0': - shape.fillStyle(this.color, Linear(0.25, 1, t)); - - if (isSizeChanged) { - var radius0 = centerRadius * 2, - radius1 = centerRadius * 3; - shape - .startAt(x, y - radius0) - .lineTo(x, y - radius1) - .setIterations(8).arc(x, y, radius1, 270, 360) - .lineTo(x + radius0, y) - .setIterations(6).arc(x, y, radius0, 360, 270, true) - .close(); - } - break; - case 'arc1': - shape.fillStyle(this.color, Linear(0.25, 1, t)); - - if (isSizeChanged) { - var radius0 = centerRadius * 4, - radius1 = centerRadius * 5; - shape - .startAt(x, y - radius0) - .lineTo(x, y - radius1) - .setIterations(8).arc(x, y, radius1, 270, 360) - .lineTo(x + radius0, y) - .setIterations(6).arc(x, y, radius0, 360, 270, true) - .close(); - } - break; - } - } - } -} - -export default Radio; \ No newline at end of file diff --git a/spaces/Akseluhr/whisper-sv-SE-auhr/app.py b/spaces/Akseluhr/whisper-sv-SE-auhr/app.py deleted file mode 100644 index 0ab0248e2b25ec930548982ad05a24e791fd932f..0000000000000000000000000000000000000000 --- a/spaces/Akseluhr/whisper-sv-SE-auhr/app.py +++ /dev/null @@ -1,47 +0,0 @@ -from transformers import pipeline -import gradio as gr -from pytube import YouTube -import os - -# Get model from my model repo -pipe = pipeline(model="Akseluhr/whisper-small-sv-SE-auhr-v2") - -def get_audio(url): - yt = YouTube(url) # Downloads yt video - video = yt.streams.filter(only_audio=True).first() # Gets the audio of the video - print(video) - out_file=video.download(output_path=".") # Write the stream to disk - base, ext = os.path.splitext(out_file) # Split the path - new_file = base+'.mp3' - os.rename(out_file, new_file) # Convert to .mp3 - audio_file = new_file - return audio_file - -def transcribe(rec=None, file=None, url=""): - if rec != None: - audio = rec - elif file != None: - audio = file - elif url != "": - audio = get_audio(url) - else: - return "Provide a recording or a file." - - text = pipe(audio)["text"] - return text - - -iface = gr.Interface( - fn=transcribe, - inputs=[ - gr.Audio(source="microphone", type="filepath", optional=True), - gr.Audio(source="upload", type="filepath", optional=True), - gr.Textbox(placeholder='Enter the Youtube video URL', label='URL', optional=True), - ], - outputs="text", - title="Whisper Small Swedish", - description="Realtime demo for Swedish speech recognition using a fine-tuned Whisper model.", -) - - -iface.launch() \ No newline at end of file diff --git a/spaces/Alican/pixera/data/single_dataset.py b/spaces/Alican/pixera/data/single_dataset.py deleted file mode 100644 index 9a5c3232f2ff746e73eeb4a7775027796dd20969..0000000000000000000000000000000000000000 --- a/spaces/Alican/pixera/data/single_dataset.py +++ /dev/null @@ -1,40 +0,0 @@ -from data.base_dataset import BaseDataset, get_transform -from data.image_folder import make_dataset -from PIL import Image - - -class SingleDataset(BaseDataset): - """This dataset class can load a set of images specified by the path --dataroot /path/to/data. - - It can be used for generating CycleGAN results only for one side with the model option '-model test'. - """ - - def __init__(self, opt): - """Initialize this dataset class. - - Parameters: - opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions - """ - BaseDataset.__init__(self, opt) - self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size)) - input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc - self.transform = get_transform(opt, grayscale=(input_nc == 1)) - - def __getitem__(self, index): - """Return a data point and its metadata information. - - Parameters: - index - - a random integer for data indexing - - Returns a dictionary that contains A and A_paths - A(tensor) - - an image in one domain - A_paths(str) - - the path of the image - """ - A_path = self.A_paths[index] - A_img = Image.open(A_path).convert('RGB') - A = self.transform(A_img) - return {'A': A, 'A_paths': A_path} - - def __len__(self): - """Return the total number of images in the dataset.""" - return len(self.A_paths) diff --git a/spaces/AmirTrader/LinearRegression/app.py b/spaces/AmirTrader/LinearRegression/app.py deleted file mode 100644 index 729aa78a8c96b2504a054b5285bee59b2a49a4e7..0000000000000000000000000000000000000000 --- a/spaces/AmirTrader/LinearRegression/app.py +++ /dev/null @@ -1,221 +0,0 @@ - -import pandas as pd -import panel as pn -from datetime import datetime -from datetime import date -pn.extension('bokeh', template='bootstrap') -import hvplot.pandas - -import pandas as pd -import yfinance as yf -import panel as pn - -@pn.cache -def get_df(ticker, startdate , enddate , interval="1d",window=50,window2=150): - # interval="1d" - # get_df(ticker ="PG", startdate="2000-01-01" , enddate="2023-09-01" , interval="1d") - DF = yf.Ticker(ticker).history(start=startdate,end=enddate,interval=interval) - DF['SMA'] = DF.Close.rolling(window=window).mean() - DF['SMA2'] = DF.Close.rolling(window=window2).mean() - DF = DF.reset_index() - return DF - -def get_hvplot(ticker , startdate , enddate , interval,window,window2): - DF = get_df(ticker , startdate=startdate , enddate=enddate , interval=interval,window=window,window2=window2) - - import hvplot.pandas # Ensure hvplot is installed (pip install hvplot) - from sklearn.linear_model import LinearRegression - import holoviews as hv - hv.extension('bokeh') - # Assuming your dataframe is named 'df' with columns 'Date' and 'Close' - # If not, replace 'Date' and 'Close' with your actual column names. - - # Step 1: Create a scatter plot using hvplot - scatter_plot = DF.hvplot(x='Date', y='Close', kind='scatter',title=f'{ticker} Close vs. Date') - - # Step 2: Fit a linear regression model - DF['Date2'] = pd.to_numeric(DF['Date']) - X = DF[['Date2']] - y = DF[['Close']] #.values - model = LinearRegression().fit(X, y) - - # # Step 3: Predict using the linear regression model - DF['Predicted_Close'] = model.predict(X) - - # # Step 4: Create a line plot for linear regression - line_plot = DF.hvplot(x='Date', y='Predicted_Close', kind='line',line_dash='dashed', color='red') - line_plot_SMA = DF.hvplot(x='Date', y='SMA', kind='line',line_dash='dashed', color='orange') - line_plot_SMA2 = DF.hvplot(x='Date', y='SMA2', kind='line',line_dash='dashed', color='orange') - - # # Step 5: Overlay scatter plot and linear regression line - # return (scatter_plot * line_plot).opts(width=800, height=600, show_grid=True, gridstyle={ 'grid_line_color': 'gray'}) - # grid_style = {'grid_line_color': 'black'}#, 'grid_line_width': 1.5, 'ygrid_bounds': (0.3, 0.7),'minor_xgrid_line_color': 'lightgray', 'xgrid_line_dash': [4, 4]} - return (scatter_plot * line_plot *line_plot_SMA *line_plot_SMA2).opts(width=800, height=600, show_grid=True) -def get_income_statement_df(ticker): - yfobj = yf.Ticker(ticker) - df= yfobj.financials.T - df.index = pd.to_datetime(df.index, format='%Y-%m-%d') - return df - -def get_income_hvplot(ticker): - DF = get_income_statement_df(ticker) - plt1 = DF.hvplot.line(y='Total Revenue') * DF.hvplot.scatter(y='Total Revenue').opts(color="red") - plt1.opts(width=600, height=450, show_grid=True) - plt2 = DF.hvplot.line(y='Gross Profit') * DF.hvplot.scatter(y='Gross Profit').opts(color="red") - plt2.opts(width=600, height=450, show_grid=True) - plt3 = DF.hvplot.line(y='Net Income') * DF.hvplot.scatter(y='Net Income').opts(color="red") - plt3.opts(width=600, height=450, show_grid=True) - return pn.Column(plt1 , plt2 , plt3 ) - # return ( DF.hvplot.line(y='Net Income') * DF.hvplot.scatter(y='Net Income').opts(color="red") )+ (DF.hvplot.line(y='Gross Profit') * DF.hvplot.scatter(y='Gross Profit').opts(color="red") )+ - # (DF.hvplot.line(y='Total Revenue') * DF.hvplot.scatter(y='Total Revenue').opts(color="red") ) - -def lookup_discountedrate(betavalue): - betavsdiscountedrate = {1: 5, 1: 6, 1.1: 6.5, 1.2: 7, 1.3: 7.5, 1.4: 8, 1.5: 8.5, 1.6: 9} - if betavalue < 1: - return betavsdiscountedrate[1] # Return the value for key 1 if key is below 1 - elif betavalue > 1.6: - return betavsdiscountedrate[1.6] # Return the value for key 1.6 if key is above 1.6 - else: - # Find the closest key to the given key - closest_key = min(betavsdiscountedrate.keys(), key=lambda x: abs(x - betavalue)) - - # Get the corresponding value - value = betavsdiscountedrate[closest_key] - - return value - - -def calc_fairprice_CDF(ticker): - import yfinance as yf - yfobj = yf.Ticker(ticker) - - #calculate eps growing next 5 years - EPSnext5Y = yfobj.get_info()['trailingPE'] / yfobj.get_info()['trailingPegRatio'] - - years = 10 - # - cashflowinitial = yfobj.get_info()['operatingCashflow'] - - cashflowlst=[] - cashflow = cashflowinitial - for i in range(1,years+1): - cashflow = cashflow*(1+EPSnext5Y/100) - cashflowlst.append(cashflow) - - try: - discountedrate = lookup_discountedrate(yfobj.get_info()['beta']) - except: - discountedrate = 5 - - discountedfactorlst =[] - discountedvaluelst=[] - discountedfactor =1 - - for i in range(1,years+1): - discountedfactor =( 1 / (1+ discountedrate/100)**i) - discountedfactorlst.append(discountedfactor) - discountedvalue = discountedfactor * cashflowlst[i-1] - discountedvaluelst.append(discountedvalue) - - PV10yearsCashFlow =0 - for i in range(0,years): - PV10yearsCashFlow += discountedvaluelst[i] - - #intrinsic value before cash/debt - intrinsicvaluebeforecashdebt = PV10yearsCashFlow / yfobj.get_info()['sharesOutstanding'] - - debtpershare = yfobj.get_info()['totalDebt'] / yfobj.get_info()['sharesOutstanding'] - cashpershare = yfobj.get_info()['totalCash'] / yfobj.get_info()['sharesOutstanding'] - intrinsicvalue = intrinsicvaluebeforecashdebt + cashpershare - debtpershare - - previousClose = yfobj.get_info()['previousClose'] - deviation = 100*(intrinsicvalue - previousClose) / previousClose - # return intrinsicvalue , previousClose , deviation - return pn.Row(pn.widgets.StaticText(name='fairprice_CDF', value=str(round(intrinsicvalue,1))) ,pn.widgets.StaticText(name='deviation', value=str(round(deviation,2))) ) - - -def calc_fairprice_DnetP(ticker): - import yfinance as yf - yfobj = yf.Ticker(ticker) - - #calculate eps growing next 5 years - EPSnext5Y = yfobj.get_info()['trailingPE'] / yfobj.get_info()['trailingPegRatio'] - - years = 5 - # - cashflowinitial = yfobj.get_info()['netIncomeToCommon'] - - cashflowlst=[] - cashflow = cashflowinitial - for i in range(1,years+1): - cashflow = cashflow*(1+EPSnext5Y/100) - cashflowlst.append(cashflow) - - try: - discountedrate = lookup_discountedrate(yfobj.get_info()['beta']) - except: - discountedrate = 5 - - discountedfactorlst =[] - discountedvaluelst=[] - discountedfactor =1 - - for i in range(1,years+1): - discountedfactor =( 1 / (1+ discountedrate/100)**i) - discountedfactorlst.append(discountedfactor) - discountedvalue = discountedfactor * cashflowlst[i-1] - discountedvaluelst.append(discountedvalue) - - PV10yearsCashFlow =0 - for i in range(0,years): - PV10yearsCashFlow += discountedvaluelst[i] - - #intrinsic value before cash/debt - intrinsicvaluebeforecashdebt = PV10yearsCashFlow / yfobj.get_info()['sharesOutstanding'] - - debtpershare = yfobj.get_info()['totalDebt'] / yfobj.get_info()['sharesOutstanding'] - cashpershare = yfobj.get_info()['totalCash'] / yfobj.get_info()['sharesOutstanding'] - intrinsicvalue = intrinsicvaluebeforecashdebt + cashpershare - debtpershare - - previousClose = yfobj.get_info()['previousClose'] - intrinsicvalue= intrinsicvalue + previousClose - - deviation = 100*(intrinsicvalue - previousClose) / previousClose - # return intrinsicvalue , previousClose , deviation - return pn.Row(pn.widgets.StaticText(name='fairprice_DnetP', value=str(round(intrinsicvalue,1))) , pn.widgets.StaticText(name='deviation', value=str(round(deviation,2))) ) - -# tickers = ['AAPL', 'META', 'GOOG', 'IBM', 'MSFT','NKE','DLTR','DG'] -# ticker = pn.widgets.Select(name='Ticker', options=tickers) - -tickers = pd.read_csv('tickers.csv').Ticker.to_list() -ticker = pn.widgets.AutocompleteInput(name='Ticker', options=tickers , placeholder='Write Ticker here همین جا') -ticker.value = "AAPL" -window = pn.widgets.IntSlider(name='Window Size', value=50, start=5, end=1000, step=5) -window2 = pn.widgets.IntSlider(name='Window Size2', value=150, start=5, end=1000, step=5) - -# Create a DatePicker widget with a minimum date of 2000-01-01 -date_start = pn.widgets.DatePicker( - name ="Start Date", - description='Select a Date', - start= date(2000, 1, 1) -) - -date_end = pn.widgets.DatePicker( - name ="End Date",# value=datetime(2000, 1, 1), - description='Select a Date', - end= date.today() #date(2023, 9, 1) -) - -date_start.value = date(2010,1,1) -date_end.value = date.today() - -pn.Row( - pn.Column( ticker, window , window2, date_start , date_end), - # pn.bind(calc_fairprice_CDF,ticker), - # pn.bind(calc_fairprice_DnetP,ticker)), - # pn.panel(pn.bind(get_hvplot, ticker, "2010-01-01","2023-09-01","1d")) #, sizing_mode='stretch_width') - pn.panel(pn.bind(get_hvplot, ticker, date_start , date_end,"1d",window,window2)), #, sizing_mode='stretch_width') - pn.panel(pn.bind(get_income_hvplot, ticker)) #, sizing_mode='stretch_width') -).servable(title="Under Valued Screener- Linear Regression") - - diff --git a/spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/python/dqn/dqn.py b/spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/python/dqn/dqn.py deleted file mode 100644 index 6cea64d39baa7ff4c1e549869aaa4b0ae17779a9..0000000000000000000000000000000000000000 --- a/spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/python/dqn/dqn.py +++ /dev/null @@ -1,245 +0,0 @@ -from typing import Any, Dict, List, Optional, Tuple, Type, Union - -import gym -import numpy as np -import torch as th -from torch.nn import functional as F - -from stable_baselines3.common import logger -from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm -from stable_baselines3.common.preprocessing import maybe_transpose -from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule -from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update -from stable_baselines3.dqn.policies import DQNPolicy - - -class DQN(OffPolicyAlgorithm): - """ - Deep Q-Network (DQN) - - Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236 - Default hyperparameters are taken from the nature paper, - except for the optimizer and learning rate that were taken from Stable Baselines defaults. - - :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) - :param env: The environment to learn from (if registered in Gym, can be str) - :param learning_rate: The learning rate, it can be a function - of the current progress remaining (from 1 to 0) - :param buffer_size: size of the replay buffer - :param learning_starts: how many steps of the model to collect transitions for before learning starts - :param batch_size: Minibatch size for each gradient update - :param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update - :param gamma: the discount factor - :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit - like ``(5, "step")`` or ``(2, "episode")``. - :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) - Set to ``-1`` means to do as many gradient steps as steps done in the environment - during the rollout. - :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer - at a cost of more complexity. - See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 - :param target_update_interval: update the target network every ``target_update_interval`` - environment steps. - :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced - :param exploration_initial_eps: initial value of random action probability - :param exploration_final_eps: final value of random action probability - :param max_grad_norm: The maximum value for the gradient clipping - :param tensorboard_log: the log location for tensorboard (if None, no logging) - :param create_eval_env: Whether to create a second environment that will be - used for evaluating the agent periodically. (Only available when passing string for the environment) - :param policy_kwargs: additional arguments to be passed to the policy on creation - :param verbose: the verbosity level: 0 no output, 1 info, 2 debug - :param seed: Seed for the pseudo random generators - :param device: Device (cpu, cuda, ...) on which the code should be run. - Setting it to auto, the code will be run on the GPU if possible. - :param _init_setup_model: Whether or not to build the network at the creation of the instance - """ - - def __init__( - self, - policy: Union[str, Type[DQNPolicy]], - env: Union[GymEnv, str], - learning_rate: Union[float, Schedule] = 1e-4, - buffer_size: int = 1000000, - learning_starts: int = 50000, - batch_size: Optional[int] = 32, - tau: float = 1.0, - gamma: float = 0.99, - train_freq: Union[int, Tuple[int, str]] = 4, - gradient_steps: int = 1, - optimize_memory_usage: bool = False, - target_update_interval: int = 10000, - exploration_fraction: float = 0.1, - exploration_initial_eps: float = 1.0, - exploration_final_eps: float = 0.05, - max_grad_norm: float = 10, - tensorboard_log: Optional[str] = None, - create_eval_env: bool = False, - policy_kwargs: Optional[Dict[str, Any]] = None, - verbose: int = 0, - seed: Optional[int] = None, - device: Union[th.device, str] = "auto", - _init_setup_model: bool = True, - ): - - super(DQN, self).__init__( - policy, - env, - DQNPolicy, - learning_rate, - buffer_size, - learning_starts, - batch_size, - tau, - gamma, - train_freq, - gradient_steps, - action_noise=None, # No action noise - policy_kwargs=policy_kwargs, - tensorboard_log=tensorboard_log, - verbose=verbose, - device=device, - create_eval_env=create_eval_env, - seed=seed, - sde_support=False, - optimize_memory_usage=optimize_memory_usage, - supported_action_spaces=(gym.spaces.Discrete,), - ) - - self.exploration_initial_eps = exploration_initial_eps - self.exploration_final_eps = exploration_final_eps - self.exploration_fraction = exploration_fraction - self.target_update_interval = target_update_interval - self.max_grad_norm = max_grad_norm - # "epsilon" for the epsilon-greedy exploration - self.exploration_rate = 0.0 - # Linear schedule will be defined in `_setup_model()` - self.exploration_schedule = None - self.q_net, self.q_net_target = None, None - - if _init_setup_model: - self._setup_model() - - def _setup_model(self) -> None: - super(DQN, self)._setup_model() - self._create_aliases() - self.exploration_schedule = get_linear_fn( - self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction - ) - - def _create_aliases(self) -> None: - self.q_net = self.policy.q_net - self.q_net_target = self.policy.q_net_target - - def _on_step(self) -> None: - """ - Update the exploration rate and target network if needed. - This method is called in ``collect_rollouts()`` after each step in the environment. - """ - if self.num_timesteps % self.target_update_interval == 0: - polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau) - - self.exploration_rate = self.exploration_schedule(self._current_progress_remaining) - logger.record("rollout/exploration rate", self.exploration_rate) - - def train(self, gradient_steps: int, batch_size: int = 100) -> None: - # Update learning rate according to schedule - self._update_learning_rate(self.policy.optimizer) - - losses = [] - for _ in range(gradient_steps): - # Sample replay buffer - replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) - - with th.no_grad(): - # Compute the next Q-values using the target network - next_q_values = self.q_net_target(replay_data.next_observations) - # Follow greedy policy: use the one with the highest value - next_q_values, _ = next_q_values.max(dim=1) - # Avoid potential broadcast issue - next_q_values = next_q_values.reshape(-1, 1) - # 1-step TD target - target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values - - # Get current Q-values estimates - current_q_values = self.q_net(replay_data.observations) - - # Retrieve the q-values for the actions from the replay buffer - current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long()) - - # Compute Huber loss (less sensitive to outliers) - loss = F.smooth_l1_loss(current_q_values, target_q_values) - losses.append(loss.item()) - - # Optimize the policy - self.policy.optimizer.zero_grad() - loss.backward() - # Clip gradient norm - th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm) - self.policy.optimizer.step() - - # Increase update counter - self._n_updates += gradient_steps - - logger.record("train/n_updates", self._n_updates, exclude="tensorboard") - logger.record("train/loss", np.mean(losses)) - - def predict( - self, - observation: np.ndarray, - state: Optional[np.ndarray] = None, - mask: Optional[np.ndarray] = None, - deterministic: bool = False, - ) -> Tuple[np.ndarray, Optional[np.ndarray]]: - """ - Overrides the base_class predict function to include epsilon-greedy exploration. - - :param observation: the input observation - :param state: The last states (can be None, used in recurrent policies) - :param mask: The last masks (can be None, used in recurrent policies) - :param deterministic: Whether or not to return deterministic actions. - :return: the model's action and the next state - (used in recurrent policies) - """ - if not deterministic and np.random.rand() < self.exploration_rate: - if is_vectorized_observation(maybe_transpose(observation, self.observation_space), self.observation_space): - n_batch = observation.shape[0] - action = np.array([self.action_space.sample() for _ in range(n_batch)]) - else: - action = np.array(self.action_space.sample()) - else: - action, state = self.policy.predict(observation, state, mask, deterministic) - return action, state - - def learn( - self, - total_timesteps: int, - callback: MaybeCallback = None, - log_interval: int = 4, - eval_env: Optional[GymEnv] = None, - eval_freq: int = -1, - n_eval_episodes: int = 5, - tb_log_name: str = "DQN", - eval_log_path: Optional[str] = None, - reset_num_timesteps: bool = True, - ) -> OffPolicyAlgorithm: - - return super(DQN, self).learn( - total_timesteps=total_timesteps, - callback=callback, - log_interval=log_interval, - eval_env=eval_env, - eval_freq=eval_freq, - n_eval_episodes=n_eval_episodes, - tb_log_name=tb_log_name, - eval_log_path=eval_log_path, - reset_num_timesteps=reset_num_timesteps, - ) - - def _excluded_save_params(self) -> List[str]: - return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"] - - def _get_torch_save_params(self) -> Tuple[List[str], List[str]]: - state_dicts = ["policy", "policy.optimizer"] - - return state_dicts, [] diff --git a/spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/utils/opt.py b/spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/utils/opt.py deleted file mode 100644 index 9a655604370d2ebe345270930f705e3bd8d7a6ad..0000000000000000000000000000000000000000 --- a/spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/utils/opt.py +++ /dev/null @@ -1,100 +0,0 @@ -""" -Modified from https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.4/tools/program.py -""" -from typing import Optional -from argparse import ArgumentParser, RawDescriptionHelpFormatter -import yaml -import json -from src.utils.loading import load_yaml - - -class Config(dict): - """Single level attribute dict, NOT recursive""" - - def __init__(self, yaml_path): - super(Config, self).__init__() - - config = load_yaml(yaml_path) - super(Config, self).update(config) - - def __getattr__(self, key): - if key in self: - return self[key] - raise AttributeError("object has no attribute '{}'".format(key)) - - def save_yaml(self, path): - print(f"Saving config to {path}...") - with open(path, "w") as f: - yaml.dump(dict(self), f, default_flow_style=False, sort_keys=False) - - @classmethod - def load_yaml(cls, path): - print(f"Loading config from {path}...") - return cls(path) - - def __repr__(self) -> str: - return str(json.dumps(dict(self), sort_keys=False, indent=4)) - - -class Opts(ArgumentParser): - def __init__(self, cfg: Optional[str] = None): - super(Opts, self).__init__(formatter_class=RawDescriptionHelpFormatter) - self.add_argument( - "-c", "--config", default=cfg, help="configuration file to use" - ) - self.add_argument( - "-o", "--opt", nargs="+", help="override configuration options" - ) - - def parse_args(self, argv=None): - args = super(Opts, self).parse_args(argv) - assert args.config is not None, "Please specify --config=configure_file_path." - args.opt = self._parse_opt(args.opt) - - config = Config(args.config) - config = self.override(config, args.opt) - return config - - def _parse_opt(self, opts): - config = {} - if not opts: - return config - for s in opts: - s = s.strip() - k, v = s.split("=") - config[k] = yaml.load(v, Loader=yaml.Loader) - return config - - def override(self, global_config, overriden): - """ - Merge config into global config. - Args: - config (dict): Config to be merged. - Returns: global config - """ - print("Overriding configurating") - for key, value in overriden.items(): - if "." not in key: - if isinstance(value, dict) and key in global_config: - global_config[key].update(value) - else: - if key in global_config.keys(): - global_config[key] = value - print(f"'{key}' not found in config") - else: - sub_keys = key.split(".") - assert ( - sub_keys[0] in global_config - ), "the sub_keys can only be one of global_config: {}, but get: {}, please check your running command".format( - global_config.keys(), sub_keys[0] - ) - cur = global_config[sub_keys[0]] - for idx, sub_key in enumerate(sub_keys[1:]): - if idx == len(sub_keys) - 2: - if sub_key in cur.keys(): - cur[sub_key] = value - else: - print(f"'{key}' not found in config") - else: - cur = cur[sub_key] - return global_config diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/sabl_retina_head.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/sabl_retina_head.py deleted file mode 100644 index 4211622cb8b4fe807230a89bcaab8f4f1681bfc0..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/sabl_retina_head.py +++ /dev/null @@ -1,621 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init -from mmcv.runner import force_fp32 - -from mmdet.core import (build_anchor_generator, build_assigner, - build_bbox_coder, build_sampler, images_to_levels, - multi_apply, multiclass_nms, unmap) -from ..builder import HEADS, build_loss -from .base_dense_head import BaseDenseHead -from .guided_anchor_head import GuidedAnchorHead - - -@HEADS.register_module() -class SABLRetinaHead(BaseDenseHead): - """Side-Aware Boundary Localization (SABL) for RetinaNet. - - The anchor generation, assigning and sampling in SABLRetinaHead - are the same as GuidedAnchorHead for guided anchoring. - - Please refer to https://arxiv.org/abs/1912.04260 for more details. - - Args: - num_classes (int): Number of classes. - in_channels (int): Number of channels in the input feature map. - stacked_convs (int): Number of Convs for classification \ - and regression branches. Defaults to 4. - feat_channels (int): Number of hidden channels. \ - Defaults to 256. - approx_anchor_generator (dict): Config dict for approx generator. - square_anchor_generator (dict): Config dict for square generator. - conv_cfg (dict): Config dict for ConvModule. Defaults to None. - norm_cfg (dict): Config dict for Norm Layer. Defaults to None. - bbox_coder (dict): Config dict for bbox coder. - reg_decoded_bbox (bool): If true, the regression loss would be - applied directly on decoded bounding boxes, converting both - the predicted boxes and regression targets to absolute - coordinates format. Default False. It should be `True` when - using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. - train_cfg (dict): Training config of SABLRetinaHead. - test_cfg (dict): Testing config of SABLRetinaHead. - loss_cls (dict): Config of classification loss. - loss_bbox_cls (dict): Config of classification loss for bbox branch. - loss_bbox_reg (dict): Config of regression loss for bbox branch. - """ - - def __init__(self, - num_classes, - in_channels, - stacked_convs=4, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[4], - strides=[8, 16, 32, 64, 128]), - conv_cfg=None, - norm_cfg=None, - bbox_coder=dict( - type='BucketingBBoxCoder', - num_buckets=14, - scale_factor=3.0), - reg_decoded_bbox=False, - train_cfg=None, - test_cfg=None, - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.5), - loss_bbox_reg=dict( - type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)): - super(SABLRetinaHead, self).__init__() - self.in_channels = in_channels - self.num_classes = num_classes - self.feat_channels = feat_channels - self.num_buckets = bbox_coder['num_buckets'] - self.side_num = int(np.ceil(self.num_buckets / 2)) - - assert (approx_anchor_generator['octave_base_scale'] == - square_anchor_generator['scales'][0]) - assert (approx_anchor_generator['strides'] == - square_anchor_generator['strides']) - - self.approx_anchor_generator = build_anchor_generator( - approx_anchor_generator) - self.square_anchor_generator = build_anchor_generator( - square_anchor_generator) - self.approxs_per_octave = ( - self.approx_anchor_generator.num_base_anchors[0]) - - # one anchor per location - self.num_anchors = 1 - self.stacked_convs = stacked_convs - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - - self.reg_decoded_bbox = reg_decoded_bbox - - self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) - self.sampling = loss_cls['type'] not in [ - 'FocalLoss', 'GHMC', 'QualityFocalLoss' - ] - if self.use_sigmoid_cls: - self.cls_out_channels = num_classes - else: - self.cls_out_channels = num_classes + 1 - - self.bbox_coder = build_bbox_coder(bbox_coder) - self.loss_cls = build_loss(loss_cls) - self.loss_bbox_cls = build_loss(loss_bbox_cls) - self.loss_bbox_reg = build_loss(loss_bbox_reg) - - self.train_cfg = train_cfg - self.test_cfg = test_cfg - - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - # use PseudoSampler when sampling is False - if self.sampling and hasattr(self.train_cfg, 'sampler'): - sampler_cfg = self.train_cfg.sampler - else: - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - - self.fp16_enabled = False - self._init_layers() - - def _init_layers(self): - self.relu = nn.ReLU(inplace=True) - self.cls_convs = nn.ModuleList() - self.reg_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.reg_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.retina_cls = nn.Conv2d( - self.feat_channels, self.cls_out_channels, 3, padding=1) - self.retina_bbox_reg = nn.Conv2d( - self.feat_channels, self.side_num * 4, 3, padding=1) - self.retina_bbox_cls = nn.Conv2d( - self.feat_channels, self.side_num * 4, 3, padding=1) - - def init_weights(self): - for m in self.cls_convs: - normal_init(m.conv, std=0.01) - for m in self.reg_convs: - normal_init(m.conv, std=0.01) - bias_cls = bias_init_with_prob(0.01) - normal_init(self.retina_cls, std=0.01, bias=bias_cls) - normal_init(self.retina_bbox_reg, std=0.01) - normal_init(self.retina_bbox_cls, std=0.01) - - def forward_single(self, x): - cls_feat = x - reg_feat = x - for cls_conv in self.cls_convs: - cls_feat = cls_conv(cls_feat) - for reg_conv in self.reg_convs: - reg_feat = reg_conv(reg_feat) - cls_score = self.retina_cls(cls_feat) - bbox_cls_pred = self.retina_bbox_cls(reg_feat) - bbox_reg_pred = self.retina_bbox_reg(reg_feat) - bbox_pred = (bbox_cls_pred, bbox_reg_pred) - return cls_score, bbox_pred - - def forward(self, feats): - return multi_apply(self.forward_single, feats) - - def get_anchors(self, featmap_sizes, img_metas, device='cuda'): - """Get squares according to feature map sizes and guided anchors. - - Args: - featmap_sizes (list[tuple]): Multi-level feature map sizes. - img_metas (list[dict]): Image meta info. - device (torch.device | str): device for returned tensors - - Returns: - tuple: square approxs of each image - """ - num_imgs = len(img_metas) - - # since feature map sizes of all images are the same, we only compute - # squares for one time - multi_level_squares = self.square_anchor_generator.grid_anchors( - featmap_sizes, device=device) - squares_list = [multi_level_squares for _ in range(num_imgs)] - - return squares_list - - def get_target(self, - approx_list, - inside_flag_list, - square_list, - gt_bboxes_list, - img_metas, - gt_bboxes_ignore_list=None, - gt_labels_list=None, - label_channels=None, - sampling=True, - unmap_outputs=True): - """Compute bucketing targets. - Args: - approx_list (list[list]): Multi level approxs of each image. - inside_flag_list (list[list]): Multi level inside flags of each - image. - square_list (list[list]): Multi level squares of each image. - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. - img_metas (list[dict]): Meta info of each image. - gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes. - gt_bboxes_list (list[Tensor]): Gt bboxes of each image. - label_channels (int): Channel of label. - sampling (bool): Sample Anchors or not. - unmap_outputs (bool): unmap outputs or not. - - Returns: - tuple: Returns a tuple containing learning targets. - - - labels_list (list[Tensor]): Labels of each level. - - label_weights_list (list[Tensor]): Label weights of each \ - level. - - bbox_cls_targets_list (list[Tensor]): BBox cls targets of \ - each level. - - bbox_cls_weights_list (list[Tensor]): BBox cls weights of \ - each level. - - bbox_reg_targets_list (list[Tensor]): BBox reg targets of \ - each level. - - bbox_reg_weights_list (list[Tensor]): BBox reg weights of \ - each level. - - num_total_pos (int): Number of positive samples in all \ - images. - - num_total_neg (int): Number of negative samples in all \ - images. - """ - num_imgs = len(img_metas) - assert len(approx_list) == len(inside_flag_list) == len( - square_list) == num_imgs - # anchor number of multi levels - num_level_squares = [squares.size(0) for squares in square_list[0]] - # concat all level anchors and flags to a single tensor - inside_flag_flat_list = [] - approx_flat_list = [] - square_flat_list = [] - for i in range(num_imgs): - assert len(square_list[i]) == len(inside_flag_list[i]) - inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) - approx_flat_list.append(torch.cat(approx_list[i])) - square_flat_list.append(torch.cat(square_list[i])) - - # compute targets for each image - if gt_bboxes_ignore_list is None: - gt_bboxes_ignore_list = [None for _ in range(num_imgs)] - if gt_labels_list is None: - gt_labels_list = [None for _ in range(num_imgs)] - (all_labels, all_label_weights, all_bbox_cls_targets, - all_bbox_cls_weights, all_bbox_reg_targets, all_bbox_reg_weights, - pos_inds_list, neg_inds_list) = multi_apply( - self._get_target_single, - approx_flat_list, - inside_flag_flat_list, - square_flat_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - gt_labels_list, - img_metas, - label_channels=label_channels, - sampling=sampling, - unmap_outputs=unmap_outputs) - # no valid anchors - if any([labels is None for labels in all_labels]): - return None - # sampled anchors of all images - num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) - num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) - # split targets to a list w.r.t. multiple levels - labels_list = images_to_levels(all_labels, num_level_squares) - label_weights_list = images_to_levels(all_label_weights, - num_level_squares) - bbox_cls_targets_list = images_to_levels(all_bbox_cls_targets, - num_level_squares) - bbox_cls_weights_list = images_to_levels(all_bbox_cls_weights, - num_level_squares) - bbox_reg_targets_list = images_to_levels(all_bbox_reg_targets, - num_level_squares) - bbox_reg_weights_list = images_to_levels(all_bbox_reg_weights, - num_level_squares) - return (labels_list, label_weights_list, bbox_cls_targets_list, - bbox_cls_weights_list, bbox_reg_targets_list, - bbox_reg_weights_list, num_total_pos, num_total_neg) - - def _get_target_single(self, - flat_approxs, - inside_flags, - flat_squares, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - img_meta, - label_channels=None, - sampling=True, - unmap_outputs=True): - """Compute regression and classification targets for anchors in a - single image. - - Args: - flat_approxs (Tensor): flat approxs of a single image, - shape (n, 4) - inside_flags (Tensor): inside flags of a single image, - shape (n, ). - flat_squares (Tensor): flat squares of a single image, - shape (approxs_per_octave * n, 4) - gt_bboxes (Tensor): Ground truth bboxes of a single image, \ - shape (num_gts, 4). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts,). - img_meta (dict): Meta info of the image. - label_channels (int): Channel of label. - sampling (bool): Sample Anchors or not. - unmap_outputs (bool): unmap outputs or not. - - Returns: - tuple: - - - labels_list (Tensor): Labels in a single image - - label_weights (Tensor): Label weights in a single image - - bbox_cls_targets (Tensor): BBox cls targets in a single image - - bbox_cls_weights (Tensor): BBox cls weights in a single image - - bbox_reg_targets (Tensor): BBox reg targets in a single image - - bbox_reg_weights (Tensor): BBox reg weights in a single image - - num_total_pos (int): Number of positive samples \ - in a single image - - num_total_neg (int): Number of negative samples \ - in a single image - """ - if not inside_flags.any(): - return (None, ) * 8 - # assign gt and sample anchors - expand_inside_flags = inside_flags[:, None].expand( - -1, self.approxs_per_octave).reshape(-1) - approxs = flat_approxs[expand_inside_flags, :] - squares = flat_squares[inside_flags, :] - - assign_result = self.assigner.assign(approxs, squares, - self.approxs_per_octave, - gt_bboxes, gt_bboxes_ignore) - sampling_result = self.sampler.sample(assign_result, squares, - gt_bboxes) - - num_valid_squares = squares.shape[0] - bbox_cls_targets = squares.new_zeros( - (num_valid_squares, self.side_num * 4)) - bbox_cls_weights = squares.new_zeros( - (num_valid_squares, self.side_num * 4)) - bbox_reg_targets = squares.new_zeros( - (num_valid_squares, self.side_num * 4)) - bbox_reg_weights = squares.new_zeros( - (num_valid_squares, self.side_num * 4)) - labels = squares.new_full((num_valid_squares, ), - self.num_classes, - dtype=torch.long) - label_weights = squares.new_zeros(num_valid_squares, dtype=torch.float) - - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - if len(pos_inds) > 0: - (pos_bbox_reg_targets, pos_bbox_reg_weights, pos_bbox_cls_targets, - pos_bbox_cls_weights) = self.bbox_coder.encode( - sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) - - bbox_cls_targets[pos_inds, :] = pos_bbox_cls_targets - bbox_reg_targets[pos_inds, :] = pos_bbox_reg_targets - bbox_cls_weights[pos_inds, :] = pos_bbox_cls_weights - bbox_reg_weights[pos_inds, :] = pos_bbox_reg_weights - if gt_labels is None: - # Only rpn gives gt_labels as None - # Foreground is the first class - labels[pos_inds] = 0 - else: - labels[pos_inds] = gt_labels[ - sampling_result.pos_assigned_gt_inds] - if self.train_cfg.pos_weight <= 0: - label_weights[pos_inds] = 1.0 - else: - label_weights[pos_inds] = self.train_cfg.pos_weight - if len(neg_inds) > 0: - label_weights[neg_inds] = 1.0 - - # map up to original set of anchors - if unmap_outputs: - num_total_anchors = flat_squares.size(0) - labels = unmap( - labels, num_total_anchors, inside_flags, fill=self.num_classes) - label_weights = unmap(label_weights, num_total_anchors, - inside_flags) - bbox_cls_targets = unmap(bbox_cls_targets, num_total_anchors, - inside_flags) - bbox_cls_weights = unmap(bbox_cls_weights, num_total_anchors, - inside_flags) - bbox_reg_targets = unmap(bbox_reg_targets, num_total_anchors, - inside_flags) - bbox_reg_weights = unmap(bbox_reg_weights, num_total_anchors, - inside_flags) - return (labels, label_weights, bbox_cls_targets, bbox_cls_weights, - bbox_reg_targets, bbox_reg_weights, pos_inds, neg_inds) - - def loss_single(self, cls_score, bbox_pred, labels, label_weights, - bbox_cls_targets, bbox_cls_weights, bbox_reg_targets, - bbox_reg_weights, num_total_samples): - # classification loss - labels = labels.reshape(-1) - label_weights = label_weights.reshape(-1) - cls_score = cls_score.permute(0, 2, 3, - 1).reshape(-1, self.cls_out_channels) - loss_cls = self.loss_cls( - cls_score, labels, label_weights, avg_factor=num_total_samples) - # regression loss - bbox_cls_targets = bbox_cls_targets.reshape(-1, self.side_num * 4) - bbox_cls_weights = bbox_cls_weights.reshape(-1, self.side_num * 4) - bbox_reg_targets = bbox_reg_targets.reshape(-1, self.side_num * 4) - bbox_reg_weights = bbox_reg_weights.reshape(-1, self.side_num * 4) - (bbox_cls_pred, bbox_reg_pred) = bbox_pred - bbox_cls_pred = bbox_cls_pred.permute(0, 2, 3, 1).reshape( - -1, self.side_num * 4) - bbox_reg_pred = bbox_reg_pred.permute(0, 2, 3, 1).reshape( - -1, self.side_num * 4) - loss_bbox_cls = self.loss_bbox_cls( - bbox_cls_pred, - bbox_cls_targets.long(), - bbox_cls_weights, - avg_factor=num_total_samples * 4 * self.side_num) - loss_bbox_reg = self.loss_bbox_reg( - bbox_reg_pred, - bbox_reg_targets, - bbox_reg_weights, - avg_factor=num_total_samples * 4 * self.bbox_coder.offset_topk) - return loss_cls, loss_bbox_cls, loss_bbox_reg - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.approx_anchor_generator.num_levels - - device = cls_scores[0].device - - # get sampled approxes - approxs_list, inside_flag_list = GuidedAnchorHead.get_sampled_approxs( - self, featmap_sizes, img_metas, device=device) - - square_list = self.get_anchors(featmap_sizes, img_metas, device=device) - - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - - cls_reg_targets = self.get_target( - approxs_list, - inside_flag_list, - square_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels, - sampling=self.sampling) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_cls_targets_list, - bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, - num_total_pos, num_total_neg) = cls_reg_targets - num_total_samples = ( - num_total_pos + num_total_neg if self.sampling else num_total_pos) - losses_cls, losses_bbox_cls, losses_bbox_reg = multi_apply( - self.loss_single, - cls_scores, - bbox_preds, - labels_list, - label_weights_list, - bbox_cls_targets_list, - bbox_cls_weights_list, - bbox_reg_targets_list, - bbox_reg_weights_list, - num_total_samples=num_total_samples) - return dict( - loss_cls=losses_cls, - loss_bbox_cls=losses_bbox_cls, - loss_bbox_reg=losses_bbox_reg) - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def get_bboxes(self, - cls_scores, - bbox_preds, - img_metas, - cfg=None, - rescale=False): - assert len(cls_scores) == len(bbox_preds) - num_levels = len(cls_scores) - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - - device = cls_scores[0].device - mlvl_anchors = self.get_anchors( - featmap_sizes, img_metas, device=device) - result_list = [] - for img_id in range(len(img_metas)): - cls_score_list = [ - cls_scores[i][img_id].detach() for i in range(num_levels) - ] - bbox_cls_pred_list = [ - bbox_preds[i][0][img_id].detach() for i in range(num_levels) - ] - bbox_reg_pred_list = [ - bbox_preds[i][1][img_id].detach() for i in range(num_levels) - ] - img_shape = img_metas[img_id]['img_shape'] - scale_factor = img_metas[img_id]['scale_factor'] - proposals = self.get_bboxes_single(cls_score_list, - bbox_cls_pred_list, - bbox_reg_pred_list, - mlvl_anchors[img_id], img_shape, - scale_factor, cfg, rescale) - result_list.append(proposals) - return result_list - - def get_bboxes_single(self, - cls_scores, - bbox_cls_preds, - bbox_reg_preds, - mlvl_anchors, - img_shape, - scale_factor, - cfg, - rescale=False): - cfg = self.test_cfg if cfg is None else cfg - mlvl_bboxes = [] - mlvl_scores = [] - mlvl_confids = [] - assert len(cls_scores) == len(bbox_cls_preds) == len( - bbox_reg_preds) == len(mlvl_anchors) - for cls_score, bbox_cls_pred, bbox_reg_pred, anchors in zip( - cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors): - assert cls_score.size()[-2:] == bbox_cls_pred.size( - )[-2:] == bbox_reg_pred.size()[-2::] - cls_score = cls_score.permute(1, 2, - 0).reshape(-1, self.cls_out_channels) - if self.use_sigmoid_cls: - scores = cls_score.sigmoid() - else: - scores = cls_score.softmax(-1) - bbox_cls_pred = bbox_cls_pred.permute(1, 2, 0).reshape( - -1, self.side_num * 4) - bbox_reg_pred = bbox_reg_pred.permute(1, 2, 0).reshape( - -1, self.side_num * 4) - nms_pre = cfg.get('nms_pre', -1) - if nms_pre > 0 and scores.shape[0] > nms_pre: - if self.use_sigmoid_cls: - max_scores, _ = scores.max(dim=1) - else: - max_scores, _ = scores[:, :-1].max(dim=1) - _, topk_inds = max_scores.topk(nms_pre) - anchors = anchors[topk_inds, :] - bbox_cls_pred = bbox_cls_pred[topk_inds, :] - bbox_reg_pred = bbox_reg_pred[topk_inds, :] - scores = scores[topk_inds, :] - bbox_preds = [ - bbox_cls_pred.contiguous(), - bbox_reg_pred.contiguous() - ] - bboxes, confids = self.bbox_coder.decode( - anchors.contiguous(), bbox_preds, max_shape=img_shape) - mlvl_bboxes.append(bboxes) - mlvl_scores.append(scores) - mlvl_confids.append(confids) - mlvl_bboxes = torch.cat(mlvl_bboxes) - if rescale: - mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) - mlvl_scores = torch.cat(mlvl_scores) - mlvl_confids = torch.cat(mlvl_confids) - if self.use_sigmoid_cls: - padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) - mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) - det_bboxes, det_labels = multiclass_nms( - mlvl_bboxes, - mlvl_scores, - cfg.score_thr, - cfg.nms, - cfg.max_per_img, - score_factors=mlvl_confids) - return det_bboxes, det_labels diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_512x512_160k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_512x512_160k_ade20k.py deleted file mode 100644 index 8fec6ba255f33d48a66a831de4571346a7a2bd2e..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './encnet_r50-d8_512x512_160k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug.py b/spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 2e808d8072f34d09a7b0859f90261dd66c8815dd..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/nonlocal_r50-d8.py', - '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_20k.py' -] -model = dict( - decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21)) diff --git a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/logger.py b/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/logger.py deleted file mode 100644 index b1d856dcfea6b56a2ee8d37b286887430dbfac30..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/logger.py +++ /dev/null @@ -1,495 +0,0 @@ -""" -Logger copied from OpenAI baselines to avoid extra RL-based dependencies: -https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py -""" - -import os -import sys -import shutil -import os.path as osp -import json -import time -import datetime -import tempfile -import warnings -from collections import defaultdict -from contextlib import contextmanager - -DEBUG = 10 -INFO = 20 -WARN = 30 -ERROR = 40 - -DISABLED = 50 - - -class KVWriter(object): - def writekvs(self, kvs): - raise NotImplementedError - - -class SeqWriter(object): - def writeseq(self, seq): - raise NotImplementedError - - -class HumanOutputFormat(KVWriter, SeqWriter): - def __init__(self, filename_or_file): - if isinstance(filename_or_file, str): - self.file = open(filename_or_file, "wt") - self.own_file = True - else: - assert hasattr(filename_or_file, "read"), ( - "expected file or str, got %s" % filename_or_file - ) - self.file = filename_or_file - self.own_file = False - - def writekvs(self, kvs): - # Create strings for printing - key2str = {} - for (key, val) in sorted(kvs.items()): - if hasattr(val, "__float__"): - valstr = "%-8.3g" % val - else: - valstr = str(val) - key2str[self._truncate(key)] = self._truncate(valstr) - - # Find max widths - if len(key2str) == 0: - print("WARNING: tried to write empty key-value dict") - return - else: - keywidth = max(map(len, key2str.keys())) - valwidth = max(map(len, key2str.values())) - - # Write out the data - dashes = "-" * (keywidth + valwidth + 7) - lines = [dashes] - for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()): - lines.append( - "| %s%s | %s%s |" - % (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val))) - ) - lines.append(dashes) - self.file.write("\n".join(lines) + "\n") - - # Flush the output to the file - self.file.flush() - - def _truncate(self, s): - maxlen = 30 - return s[: maxlen - 3] + "..." if len(s) > maxlen else s - - def writeseq(self, seq): - seq = list(seq) - for (i, elem) in enumerate(seq): - self.file.write(elem) - if i < len(seq) - 1: # add space unless this is the last one - self.file.write(" ") - self.file.write("\n") - self.file.flush() - - def close(self): - if self.own_file: - self.file.close() - - -class JSONOutputFormat(KVWriter): - def __init__(self, filename): - self.file = open(filename, "wt") - - def writekvs(self, kvs): - for k, v in sorted(kvs.items()): - if hasattr(v, "dtype"): - kvs[k] = float(v) - self.file.write(json.dumps(kvs) + "\n") - self.file.flush() - - def close(self): - self.file.close() - - -class CSVOutputFormat(KVWriter): - def __init__(self, filename): - self.file = open(filename, "w+t") - self.keys = [] - self.sep = "," - - def writekvs(self, kvs): - # Add our current row to the history - extra_keys = list(kvs.keys() - self.keys) - extra_keys.sort() - if extra_keys: - self.keys.extend(extra_keys) - self.file.seek(0) - lines = self.file.readlines() - self.file.seek(0) - for (i, k) in enumerate(self.keys): - if i > 0: - self.file.write(",") - self.file.write(k) - self.file.write("\n") - for line in lines[1:]: - self.file.write(line[:-1]) - self.file.write(self.sep * len(extra_keys)) - self.file.write("\n") - for (i, k) in enumerate(self.keys): - if i > 0: - self.file.write(",") - v = kvs.get(k) - if v is not None: - self.file.write(str(v)) - self.file.write("\n") - self.file.flush() - - def close(self): - self.file.close() - - -class TensorBoardOutputFormat(KVWriter): - """ - Dumps key/value pairs into TensorBoard's numeric format. - """ - - def __init__(self, dir): - os.makedirs(dir, exist_ok=True) - self.dir = dir - self.step = 1 - prefix = "events" - path = osp.join(osp.abspath(dir), prefix) - import tensorflow as tf - from tensorflow.python import pywrap_tensorflow - from tensorflow.core.util import event_pb2 - from tensorflow.python.util import compat - - self.tf = tf - self.event_pb2 = event_pb2 - self.pywrap_tensorflow = pywrap_tensorflow - self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path)) - - def writekvs(self, kvs): - def summary_val(k, v): - kwargs = {"tag": k, "simple_value": float(v)} - return self.tf.Summary.Value(**kwargs) - - summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()]) - event = self.event_pb2.Event(wall_time=time.time(), summary=summary) - event.step = ( - self.step - ) # is there any reason why you'd want to specify the step? - self.writer.WriteEvent(event) - self.writer.Flush() - self.step += 1 - - def close(self): - if self.writer: - self.writer.Close() - self.writer = None - - -def make_output_format(format, ev_dir, log_suffix=""): - os.makedirs(ev_dir, exist_ok=True) - if format == "stdout": - return HumanOutputFormat(sys.stdout) - elif format == "log": - return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix)) - elif format == "json": - return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix)) - elif format == "csv": - return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix)) - elif format == "tensorboard": - return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix)) - else: - raise ValueError("Unknown format specified: %s" % (format,)) - - -# ================================================================ -# API -# ================================================================ - - -def logkv(key, val): - """ - Log a value of some diagnostic - Call this once for each diagnostic quantity, each iteration - If called many times, last value will be used. - """ - get_current().logkv(key, val) - - -def logkv_mean(key, val): - """ - The same as logkv(), but if called many times, values averaged. - """ - get_current().logkv_mean(key, val) - - -def logkvs(d): - """ - Log a dictionary of key-value pairs - """ - for (k, v) in d.items(): - logkv(k, v) - - -def dumpkvs(): - """ - Write all of the diagnostics from the current iteration - """ - return get_current().dumpkvs() - - -def getkvs(): - return get_current().name2val - - -def log(*args, level=INFO): - """ - Write the sequence of args, with no separators, to the console and output files (if you've configured an output file). - """ - get_current().log(*args, level=level) - - -def debug(*args): - log(*args, level=DEBUG) - - -def info(*args): - log(*args, level=INFO) - - -def warn(*args): - log(*args, level=WARN) - - -def error(*args): - log(*args, level=ERROR) - - -def set_level(level): - """ - Set logging threshold on current logger. - """ - get_current().set_level(level) - - -def set_comm(comm): - get_current().set_comm(comm) - - -def get_dir(): - """ - Get directory that log files are being written to. - will be None if there is no output directory (i.e., if you didn't call start) - """ - return get_current().get_dir() - - -record_tabular = logkv -dump_tabular = dumpkvs - - -@contextmanager -def profile_kv(scopename): - logkey = "wait_" + scopename - tstart = time.time() - try: - yield - finally: - get_current().name2val[logkey] += time.time() - tstart - - -def profile(n): - """ - Usage: - @profile("my_func") - def my_func(): code - """ - - def decorator_with_name(func): - def func_wrapper(*args, **kwargs): - with profile_kv(n): - return func(*args, **kwargs) - - return func_wrapper - - return decorator_with_name - - -# ================================================================ -# Backend -# ================================================================ - - -def get_current(): - if Logger.CURRENT is None: - _configure_default_logger() - - return Logger.CURRENT - - -class Logger(object): - DEFAULT = None # A logger with no output files. (See right below class definition) - # So that you can still log to the terminal without setting up any output files - CURRENT = None # Current logger being used by the free functions above - - def __init__(self, dir, output_formats, comm=None): - self.name2val = defaultdict(float) # values this iteration - self.name2cnt = defaultdict(int) - self.level = INFO - self.dir = dir - self.output_formats = output_formats - self.comm = comm - - # Logging API, forwarded - # ---------------------------------------- - def logkv(self, key, val): - self.name2val[key] = val - - def logkv_mean(self, key, val): - oldval, cnt = self.name2val[key], self.name2cnt[key] - self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1) - self.name2cnt[key] = cnt + 1 - - def dumpkvs(self): - if self.comm is None: - d = self.name2val - else: - d = mpi_weighted_mean( - self.comm, - { - name: (val, self.name2cnt.get(name, 1)) - for (name, val) in self.name2val.items() - }, - ) - if self.comm.rank != 0: - d["dummy"] = 1 # so we don't get a warning about empty dict - out = d.copy() # Return the dict for unit testing purposes - for fmt in self.output_formats: - if isinstance(fmt, KVWriter): - fmt.writekvs(d) - self.name2val.clear() - self.name2cnt.clear() - return out - - def log(self, *args, level=INFO): - if self.level <= level: - self._do_log(args) - - # Configuration - # ---------------------------------------- - def set_level(self, level): - self.level = level - - def set_comm(self, comm): - self.comm = comm - - def get_dir(self): - return self.dir - - def close(self): - for fmt in self.output_formats: - fmt.close() - - # Misc - # ---------------------------------------- - def _do_log(self, args): - for fmt in self.output_formats: - if isinstance(fmt, SeqWriter): - fmt.writeseq(map(str, args)) - - -def get_rank_without_mpi_import(): - # check environment variables here instead of importing mpi4py - # to avoid calling MPI_Init() when this module is imported - for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]: - if varname in os.environ: - return int(os.environ[varname]) - return 0 - - -def mpi_weighted_mean(comm, local_name2valcount): - """ - Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110 - Perform a weighted average over dicts that are each on a different node - Input: local_name2valcount: dict mapping key -> (value, count) - Returns: key -> mean - """ - all_name2valcount = comm.gather(local_name2valcount) - if comm.rank == 0: - name2sum = defaultdict(float) - name2count = defaultdict(float) - for n2vc in all_name2valcount: - for (name, (val, count)) in n2vc.items(): - try: - val = float(val) - except ValueError: - if comm.rank == 0: - warnings.warn( - "WARNING: tried to compute mean on non-float {}={}".format( - name, val - ) - ) - else: - name2sum[name] += val * count - name2count[name] += count - return {name: name2sum[name] / name2count[name] for name in name2sum} - else: - return {} - - -def configure(dir=None, format_strs=None, comm=None, log_suffix=""): - """ - If comm is provided, average all numerical stats across that comm - """ - if dir is None: - dir = os.getenv("OPENAI_LOGDIR") - if dir is None: - dir = osp.join( - tempfile.gettempdir(), - datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"), - ) - assert isinstance(dir, str) - dir = os.path.expanduser(dir) - os.makedirs(os.path.expanduser(dir), exist_ok=True) - - rank = get_rank_without_mpi_import() - if rank > 0: - log_suffix = log_suffix + "-rank%03i" % rank - - if format_strs is None: - if rank == 0: - format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",") - else: - format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",") - format_strs = filter(None, format_strs) - output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs] - - Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm) - if output_formats: - log("Logging to %s" % dir) - - -def _configure_default_logger(): - configure() - Logger.DEFAULT = Logger.CURRENT - - -def reset(): - if Logger.CURRENT is not Logger.DEFAULT: - Logger.CURRENT.close() - Logger.CURRENT = Logger.DEFAULT - log("Reset logger") - - -@contextmanager -def scoped_configure(dir=None, format_strs=None, comm=None): - prevlogger = Logger.CURRENT - configure(dir=dir, format_strs=format_strs, comm=comm) - try: - yield - finally: - Logger.CURRENT.close() - Logger.CURRENT = prevlogger - diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/image/geometric.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/image/geometric.py deleted file mode 100644 index cf97c201cb4e43796c911919d03fb26a07ed817d..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/image/geometric.py +++ /dev/null @@ -1,728 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numbers - -import cv2 -import numpy as np - -from ..utils import to_2tuple -from .io import imread_backend - -try: - from PIL import Image -except ImportError: - Image = None - - -def _scale_size(size, scale): - """Rescale a size by a ratio. - - Args: - size (tuple[int]): (w, h). - scale (float | tuple(float)): Scaling factor. - - Returns: - tuple[int]: scaled size. - """ - if isinstance(scale, (float, int)): - scale = (scale, scale) - w, h = size - return int(w * float(scale[0]) + 0.5), int(h * float(scale[1]) + 0.5) - - -cv2_interp_codes = { - 'nearest': cv2.INTER_NEAREST, - 'bilinear': cv2.INTER_LINEAR, - 'bicubic': cv2.INTER_CUBIC, - 'area': cv2.INTER_AREA, - 'lanczos': cv2.INTER_LANCZOS4 -} - -if Image is not None: - pillow_interp_codes = { - 'nearest': Image.NEAREST, - 'bilinear': Image.BILINEAR, - 'bicubic': Image.BICUBIC, - 'box': Image.BOX, - 'lanczos': Image.LANCZOS, - 'hamming': Image.HAMMING - } - - -def imresize(img, - size, - return_scale=False, - interpolation='bilinear', - out=None, - backend=None): - """Resize image to a given size. - - Args: - img (ndarray): The input image. - size (tuple[int]): Target size (w, h). - return_scale (bool): Whether to return `w_scale` and `h_scale`. - interpolation (str): Interpolation method, accepted values are - "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' - backend, "nearest", "bilinear" for 'pillow' backend. - out (ndarray): The output destination. - backend (str | None): The image resize backend type. Options are `cv2`, - `pillow`, `None`. If backend is None, the global imread_backend - specified by ``mmcv.use_backend()`` will be used. Default: None. - - Returns: - tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or - `resized_img`. - """ - h, w = img.shape[:2] - if backend is None: - backend = imread_backend - if backend not in ['cv2', 'pillow']: - raise ValueError(f'backend: {backend} is not supported for resize.' - f"Supported backends are 'cv2', 'pillow'") - - if backend == 'pillow': - assert img.dtype == np.uint8, 'Pillow backend only support uint8 type' - pil_image = Image.fromarray(img) - pil_image = pil_image.resize(size, pillow_interp_codes[interpolation]) - resized_img = np.array(pil_image) - else: - resized_img = cv2.resize( - img, size, dst=out, interpolation=cv2_interp_codes[interpolation]) - if not return_scale: - return resized_img - else: - w_scale = size[0] / w - h_scale = size[1] / h - return resized_img, w_scale, h_scale - - -def imresize_to_multiple(img, - divisor, - size=None, - scale_factor=None, - keep_ratio=False, - return_scale=False, - interpolation='bilinear', - out=None, - backend=None): - """Resize image according to a given size or scale factor and then rounds - up the the resized or rescaled image size to the nearest value that can be - divided by the divisor. - - Args: - img (ndarray): The input image. - divisor (int | tuple): Resized image size will be a multiple of - divisor. If divisor is a tuple, divisor should be - (w_divisor, h_divisor). - size (None | int | tuple[int]): Target size (w, h). Default: None. - scale_factor (None | float | tuple[float]): Multiplier for spatial - size. Should match input size if it is a tuple and the 2D style is - (w_scale_factor, h_scale_factor). Default: None. - keep_ratio (bool): Whether to keep the aspect ratio when resizing the - image. Default: False. - return_scale (bool): Whether to return `w_scale` and `h_scale`. - interpolation (str): Interpolation method, accepted values are - "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' - backend, "nearest", "bilinear" for 'pillow' backend. - out (ndarray): The output destination. - backend (str | None): The image resize backend type. Options are `cv2`, - `pillow`, `None`. If backend is None, the global imread_backend - specified by ``mmcv.use_backend()`` will be used. Default: None. - - Returns: - tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or - `resized_img`. - """ - h, w = img.shape[:2] - if size is not None and scale_factor is not None: - raise ValueError('only one of size or scale_factor should be defined') - elif size is None and scale_factor is None: - raise ValueError('one of size or scale_factor should be defined') - elif size is not None: - size = to_2tuple(size) - if keep_ratio: - size = rescale_size((w, h), size, return_scale=False) - else: - size = _scale_size((w, h), scale_factor) - - divisor = to_2tuple(divisor) - size = tuple([int(np.ceil(s / d)) * d for s, d in zip(size, divisor)]) - resized_img, w_scale, h_scale = imresize( - img, - size, - return_scale=True, - interpolation=interpolation, - out=out, - backend=backend) - if return_scale: - return resized_img, w_scale, h_scale - else: - return resized_img - - -def imresize_like(img, - dst_img, - return_scale=False, - interpolation='bilinear', - backend=None): - """Resize image to the same size of a given image. - - Args: - img (ndarray): The input image. - dst_img (ndarray): The target image. - return_scale (bool): Whether to return `w_scale` and `h_scale`. - interpolation (str): Same as :func:`resize`. - backend (str | None): Same as :func:`resize`. - - Returns: - tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or - `resized_img`. - """ - h, w = dst_img.shape[:2] - return imresize(img, (w, h), return_scale, interpolation, backend=backend) - - -def rescale_size(old_size, scale, return_scale=False): - """Calculate the new size to be rescaled to. - - Args: - old_size (tuple[int]): The old size (w, h) of image. - scale (float | tuple[int]): The scaling factor or maximum size. - If it is a float number, then the image will be rescaled by this - factor, else if it is a tuple of 2 integers, then the image will - be rescaled as large as possible within the scale. - return_scale (bool): Whether to return the scaling factor besides the - rescaled image size. - - Returns: - tuple[int]: The new rescaled image size. - """ - w, h = old_size - if isinstance(scale, (float, int)): - if scale <= 0: - raise ValueError(f'Invalid scale {scale}, must be positive.') - scale_factor = scale - elif isinstance(scale, tuple): - max_long_edge = max(scale) - max_short_edge = min(scale) - scale_factor = min(max_long_edge / max(h, w), - max_short_edge / min(h, w)) - else: - raise TypeError( - f'Scale must be a number or tuple of int, but got {type(scale)}') - - new_size = _scale_size((w, h), scale_factor) - - if return_scale: - return new_size, scale_factor - else: - return new_size - - -def imrescale(img, - scale, - return_scale=False, - interpolation='bilinear', - backend=None): - """Resize image while keeping the aspect ratio. - - Args: - img (ndarray): The input image. - scale (float | tuple[int]): The scaling factor or maximum size. - If it is a float number, then the image will be rescaled by this - factor, else if it is a tuple of 2 integers, then the image will - be rescaled as large as possible within the scale. - return_scale (bool): Whether to return the scaling factor besides the - rescaled image. - interpolation (str): Same as :func:`resize`. - backend (str | None): Same as :func:`resize`. - - Returns: - ndarray: The rescaled image. - """ - h, w = img.shape[:2] - new_size, scale_factor = rescale_size((w, h), scale, return_scale=True) - rescaled_img = imresize( - img, new_size, interpolation=interpolation, backend=backend) - if return_scale: - return rescaled_img, scale_factor - else: - return rescaled_img - - -def imflip(img, direction='horizontal'): - """Flip an image horizontally or vertically. - - Args: - img (ndarray): Image to be flipped. - direction (str): The flip direction, either "horizontal" or - "vertical" or "diagonal". - - Returns: - ndarray: The flipped image. - """ - assert direction in ['horizontal', 'vertical', 'diagonal'] - if direction == 'horizontal': - return np.flip(img, axis=1) - elif direction == 'vertical': - return np.flip(img, axis=0) - else: - return np.flip(img, axis=(0, 1)) - - -def imflip_(img, direction='horizontal'): - """Inplace flip an image horizontally or vertically. - - Args: - img (ndarray): Image to be flipped. - direction (str): The flip direction, either "horizontal" or - "vertical" or "diagonal". - - Returns: - ndarray: The flipped image (inplace). - """ - assert direction in ['horizontal', 'vertical', 'diagonal'] - if direction == 'horizontal': - return cv2.flip(img, 1, img) - elif direction == 'vertical': - return cv2.flip(img, 0, img) - else: - return cv2.flip(img, -1, img) - - -def imrotate(img, - angle, - center=None, - scale=1.0, - border_value=0, - interpolation='bilinear', - auto_bound=False): - """Rotate an image. - - Args: - img (ndarray): Image to be rotated. - angle (float): Rotation angle in degrees, positive values mean - clockwise rotation. - center (tuple[float], optional): Center point (w, h) of the rotation in - the source image. If not specified, the center of the image will be - used. - scale (float): Isotropic scale factor. - border_value (int): Border value. - interpolation (str): Same as :func:`resize`. - auto_bound (bool): Whether to adjust the image size to cover the whole - rotated image. - - Returns: - ndarray: The rotated image. - """ - if center is not None and auto_bound: - raise ValueError('`auto_bound` conflicts with `center`') - h, w = img.shape[:2] - if center is None: - center = ((w - 1) * 0.5, (h - 1) * 0.5) - assert isinstance(center, tuple) - - matrix = cv2.getRotationMatrix2D(center, -angle, scale) - if auto_bound: - cos = np.abs(matrix[0, 0]) - sin = np.abs(matrix[0, 1]) - new_w = h * sin + w * cos - new_h = h * cos + w * sin - matrix[0, 2] += (new_w - w) * 0.5 - matrix[1, 2] += (new_h - h) * 0.5 - w = int(np.round(new_w)) - h = int(np.round(new_h)) - rotated = cv2.warpAffine( - img, - matrix, (w, h), - flags=cv2_interp_codes[interpolation], - borderValue=border_value) - return rotated - - -def bbox_clip(bboxes, img_shape): - """Clip bboxes to fit the image shape. - - Args: - bboxes (ndarray): Shape (..., 4*k) - img_shape (tuple[int]): (height, width) of the image. - - Returns: - ndarray: Clipped bboxes. - """ - assert bboxes.shape[-1] % 4 == 0 - cmin = np.empty(bboxes.shape[-1], dtype=bboxes.dtype) - cmin[0::2] = img_shape[1] - 1 - cmin[1::2] = img_shape[0] - 1 - clipped_bboxes = np.maximum(np.minimum(bboxes, cmin), 0) - return clipped_bboxes - - -def bbox_scaling(bboxes, scale, clip_shape=None): - """Scaling bboxes w.r.t the box center. - - Args: - bboxes (ndarray): Shape(..., 4). - scale (float): Scaling factor. - clip_shape (tuple[int], optional): If specified, bboxes that exceed the - boundary will be clipped according to the given shape (h, w). - - Returns: - ndarray: Scaled bboxes. - """ - if float(scale) == 1.0: - scaled_bboxes = bboxes.copy() - else: - w = bboxes[..., 2] - bboxes[..., 0] + 1 - h = bboxes[..., 3] - bboxes[..., 1] + 1 - dw = (w * (scale - 1)) * 0.5 - dh = (h * (scale - 1)) * 0.5 - scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1) - if clip_shape is not None: - return bbox_clip(scaled_bboxes, clip_shape) - else: - return scaled_bboxes - - -def imcrop(img, bboxes, scale=1.0, pad_fill=None): - """Crop image patches. - - 3 steps: scale the bboxes -> clip bboxes -> crop and pad. - - Args: - img (ndarray): Image to be cropped. - bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes. - scale (float, optional): Scale ratio of bboxes, the default value - 1.0 means no padding. - pad_fill (Number | list[Number]): Value to be filled for padding. - Default: None, which means no padding. - - Returns: - list[ndarray] | ndarray: The cropped image patches. - """ - chn = 1 if img.ndim == 2 else img.shape[2] - if pad_fill is not None: - if isinstance(pad_fill, (int, float)): - pad_fill = [pad_fill for _ in range(chn)] - assert len(pad_fill) == chn - - _bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes - scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32) - clipped_bbox = bbox_clip(scaled_bboxes, img.shape) - - patches = [] - for i in range(clipped_bbox.shape[0]): - x1, y1, x2, y2 = tuple(clipped_bbox[i, :]) - if pad_fill is None: - patch = img[y1:y2 + 1, x1:x2 + 1, ...] - else: - _x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :]) - if chn == 1: - patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1) - else: - patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn) - patch = np.array( - pad_fill, dtype=img.dtype) * np.ones( - patch_shape, dtype=img.dtype) - x_start = 0 if _x1 >= 0 else -_x1 - y_start = 0 if _y1 >= 0 else -_y1 - w = x2 - x1 + 1 - h = y2 - y1 + 1 - patch[y_start:y_start + h, x_start:x_start + w, - ...] = img[y1:y1 + h, x1:x1 + w, ...] - patches.append(patch) - - if bboxes.ndim == 1: - return patches[0] - else: - return patches - - -def impad(img, - *, - shape=None, - padding=None, - pad_val=0, - padding_mode='constant'): - """Pad the given image to a certain shape or pad on all sides with - specified padding mode and padding value. - - Args: - img (ndarray): Image to be padded. - shape (tuple[int]): Expected padding shape (h, w). Default: None. - padding (int or tuple[int]): Padding on each border. If a single int is - provided this is used to pad all borders. If tuple of length 2 is - provided this is the padding on left/right and top/bottom - respectively. If a tuple of length 4 is provided this is the - padding for the left, top, right and bottom borders respectively. - Default: None. Note that `shape` and `padding` can not be both - set. - pad_val (Number | Sequence[Number]): Values to be filled in padding - areas when padding_mode is 'constant'. Default: 0. - padding_mode (str): Type of padding. Should be: constant, edge, - reflect or symmetric. Default: constant. - - - constant: pads with a constant value, this value is specified - with pad_val. - - edge: pads with the last value at the edge of the image. - - reflect: pads with reflection of image without repeating the - last value on the edge. For example, padding [1, 2, 3, 4] - with 2 elements on both sides in reflect mode will result - in [3, 2, 1, 2, 3, 4, 3, 2]. - - symmetric: pads with reflection of image repeating the last - value on the edge. For example, padding [1, 2, 3, 4] with - 2 elements on both sides in symmetric mode will result in - [2, 1, 1, 2, 3, 4, 4, 3] - - Returns: - ndarray: The padded image. - """ - - assert (shape is not None) ^ (padding is not None) - if shape is not None: - padding = (0, 0, shape[1] - img.shape[1], shape[0] - img.shape[0]) - - # check pad_val - if isinstance(pad_val, tuple): - assert len(pad_val) == img.shape[-1] - elif not isinstance(pad_val, numbers.Number): - raise TypeError('pad_val must be a int or a tuple. ' - f'But received {type(pad_val)}') - - # check padding - if isinstance(padding, tuple) and len(padding) in [2, 4]: - if len(padding) == 2: - padding = (padding[0], padding[1], padding[0], padding[1]) - elif isinstance(padding, numbers.Number): - padding = (padding, padding, padding, padding) - else: - raise ValueError('Padding must be a int or a 2, or 4 element tuple.' - f'But received {padding}') - - # check padding mode - assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'] - - border_type = { - 'constant': cv2.BORDER_CONSTANT, - 'edge': cv2.BORDER_REPLICATE, - 'reflect': cv2.BORDER_REFLECT_101, - 'symmetric': cv2.BORDER_REFLECT - } - img = cv2.copyMakeBorder( - img, - padding[1], - padding[3], - padding[0], - padding[2], - border_type[padding_mode], - value=pad_val) - - return img - - -def impad_to_multiple(img, divisor, pad_val=0): - """Pad an image to ensure each edge to be multiple to some number. - - Args: - img (ndarray): Image to be padded. - divisor (int): Padded image edges will be multiple to divisor. - pad_val (Number | Sequence[Number]): Same as :func:`impad`. - - Returns: - ndarray: The padded image. - """ - pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor - pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor - return impad(img, shape=(pad_h, pad_w), pad_val=pad_val) - - -def cutout(img, shape, pad_val=0): - """Randomly cut out a rectangle from the original img. - - Args: - img (ndarray): Image to be cutout. - shape (int | tuple[int]): Expected cutout shape (h, w). If given as a - int, the value will be used for both h and w. - pad_val (int | float | tuple[int | float]): Values to be filled in the - cut area. Defaults to 0. - - Returns: - ndarray: The cutout image. - """ - - channels = 1 if img.ndim == 2 else img.shape[2] - if isinstance(shape, int): - cut_h, cut_w = shape, shape - else: - assert isinstance(shape, tuple) and len(shape) == 2, \ - f'shape must be a int or a tuple with length 2, but got type ' \ - f'{type(shape)} instead.' - cut_h, cut_w = shape - if isinstance(pad_val, (int, float)): - pad_val = tuple([pad_val] * channels) - elif isinstance(pad_val, tuple): - assert len(pad_val) == channels, \ - 'Expected the num of elements in tuple equals the channels' \ - 'of input image. Found {} vs {}'.format( - len(pad_val), channels) - else: - raise TypeError(f'Invalid type {type(pad_val)} for `pad_val`') - - img_h, img_w = img.shape[:2] - y0 = np.random.uniform(img_h) - x0 = np.random.uniform(img_w) - - y1 = int(max(0, y0 - cut_h / 2.)) - x1 = int(max(0, x0 - cut_w / 2.)) - y2 = min(img_h, y1 + cut_h) - x2 = min(img_w, x1 + cut_w) - - if img.ndim == 2: - patch_shape = (y2 - y1, x2 - x1) - else: - patch_shape = (y2 - y1, x2 - x1, channels) - - img_cutout = img.copy() - patch = np.array( - pad_val, dtype=img.dtype) * np.ones( - patch_shape, dtype=img.dtype) - img_cutout[y1:y2, x1:x2, ...] = patch - - return img_cutout - - -def _get_shear_matrix(magnitude, direction='horizontal'): - """Generate the shear matrix for transformation. - - Args: - magnitude (int | float): The magnitude used for shear. - direction (str): The flip direction, either "horizontal" - or "vertical". - - Returns: - ndarray: The shear matrix with dtype float32. - """ - if direction == 'horizontal': - shear_matrix = np.float32([[1, magnitude, 0], [0, 1, 0]]) - elif direction == 'vertical': - shear_matrix = np.float32([[1, 0, 0], [magnitude, 1, 0]]) - return shear_matrix - - -def imshear(img, - magnitude, - direction='horizontal', - border_value=0, - interpolation='bilinear'): - """Shear an image. - - Args: - img (ndarray): Image to be sheared with format (h, w) - or (h, w, c). - magnitude (int | float): The magnitude used for shear. - direction (str): The flip direction, either "horizontal" - or "vertical". - border_value (int | tuple[int]): Value used in case of a - constant border. - interpolation (str): Same as :func:`resize`. - - Returns: - ndarray: The sheared image. - """ - assert direction in ['horizontal', - 'vertical'], f'Invalid direction: {direction}' - height, width = img.shape[:2] - if img.ndim == 2: - channels = 1 - elif img.ndim == 3: - channels = img.shape[-1] - if isinstance(border_value, int): - border_value = tuple([border_value] * channels) - elif isinstance(border_value, tuple): - assert len(border_value) == channels, \ - 'Expected the num of elements in tuple equals the channels' \ - 'of input image. Found {} vs {}'.format( - len(border_value), channels) - else: - raise ValueError( - f'Invalid type {type(border_value)} for `border_value`') - shear_matrix = _get_shear_matrix(magnitude, direction) - sheared = cv2.warpAffine( - img, - shear_matrix, - (width, height), - # Note case when the number elements in `border_value` - # greater than 3 (e.g. shearing masks whose channels large - # than 3) will raise TypeError in `cv2.warpAffine`. - # Here simply slice the first 3 values in `border_value`. - borderValue=border_value[:3], - flags=cv2_interp_codes[interpolation]) - return sheared - - -def _get_translate_matrix(offset, direction='horizontal'): - """Generate the translate matrix. - - Args: - offset (int | float): The offset used for translate. - direction (str): The translate direction, either - "horizontal" or "vertical". - - Returns: - ndarray: The translate matrix with dtype float32. - """ - if direction == 'horizontal': - translate_matrix = np.float32([[1, 0, offset], [0, 1, 0]]) - elif direction == 'vertical': - translate_matrix = np.float32([[1, 0, 0], [0, 1, offset]]) - return translate_matrix - - -def imtranslate(img, - offset, - direction='horizontal', - border_value=0, - interpolation='bilinear'): - """Translate an image. - - Args: - img (ndarray): Image to be translated with format - (h, w) or (h, w, c). - offset (int | float): The offset used for translate. - direction (str): The translate direction, either "horizontal" - or "vertical". - border_value (int | tuple[int]): Value used in case of a - constant border. - interpolation (str): Same as :func:`resize`. - - Returns: - ndarray: The translated image. - """ - assert direction in ['horizontal', - 'vertical'], f'Invalid direction: {direction}' - height, width = img.shape[:2] - if img.ndim == 2: - channels = 1 - elif img.ndim == 3: - channels = img.shape[-1] - if isinstance(border_value, int): - border_value = tuple([border_value] * channels) - elif isinstance(border_value, tuple): - assert len(border_value) == channels, \ - 'Expected the num of elements in tuple equals the channels' \ - 'of input image. Found {} vs {}'.format( - len(border_value), channels) - else: - raise ValueError( - f'Invalid type {type(border_value)} for `border_value`.') - translate_matrix = _get_translate_matrix(offset, direction) - translated = cv2.warpAffine( - img, - translate_matrix, - (width, height), - # Note case when the number elements in `border_value` - # greater than 3 (e.g. translating masks whose channels - # large than 3) will raise TypeError in `cv2.warpAffine`. - # Here simply slice the first 3 values in `border_value`. - borderValue=border_value[:3], - flags=cv2_interp_codes[interpolation]) - return translated diff --git a/spaces/Apex-X/Tm/roop/metadata.py b/spaces/Apex-X/Tm/roop/metadata.py deleted file mode 100644 index 35b0f0245a38eb9ec024f2ed2c829044f6051c29..0000000000000000000000000000000000000000 --- a/spaces/Apex-X/Tm/roop/metadata.py +++ /dev/null @@ -1,2 +0,0 @@ -name = 'roop' -version = '1.1.0' diff --git a/spaces/Arnx/MusicGenXvAKN/tests/modules/test_conv.py b/spaces/Arnx/MusicGenXvAKN/tests/modules/test_conv.py deleted file mode 100644 index 28fbc4f1a0ebaf41b56947b767958ae696e75eec..0000000000000000000000000000000000000000 --- a/spaces/Arnx/MusicGenXvAKN/tests/modules/test_conv.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from itertools import product -import math -import random - -import pytest -import torch -from torch import nn - -from audiocraft.modules import ( - NormConv1d, - NormConvTranspose1d, - StreamableConv1d, - StreamableConvTranspose1d, - pad1d, - unpad1d, -) - - -def test_get_extra_padding_for_conv1d(): - # TODO: Implement me! - pass - - -def test_pad1d_zeros(): - x = torch.randn(1, 1, 20) - - xp1 = pad1d(x, (0, 5), mode='constant', value=0.) - assert xp1.shape[-1] == 25 - xp2 = pad1d(x, (5, 5), mode='constant', value=0.) - assert xp2.shape[-1] == 30 - xp3 = pad1d(x, (0, 0), mode='constant', value=0.) - assert xp3.shape[-1] == 20 - xp4 = pad1d(x, (10, 30), mode='constant', value=0.) - assert xp4.shape[-1] == 60 - - with pytest.raises(AssertionError): - pad1d(x, (-1, 0), mode='constant', value=0.) - - with pytest.raises(AssertionError): - pad1d(x, (0, -1), mode='constant', value=0.) - - with pytest.raises(AssertionError): - pad1d(x, (-1, -1), mode='constant', value=0.) - - -def test_pad1d_reflect(): - x = torch.randn(1, 1, 20) - - xp1 = pad1d(x, (0, 5), mode='reflect', value=0.) - assert xp1.shape[-1] == 25 - xp2 = pad1d(x, (5, 5), mode='reflect', value=0.) - assert xp2.shape[-1] == 30 - xp3 = pad1d(x, (0, 0), mode='reflect', value=0.) - assert xp3.shape[-1] == 20 - xp4 = pad1d(x, (10, 30), mode='reflect', value=0.) - assert xp4.shape[-1] == 60 - - with pytest.raises(AssertionError): - pad1d(x, (-1, 0), mode='reflect', value=0.) - - with pytest.raises(AssertionError): - pad1d(x, (0, -1), mode='reflect', value=0.) - - with pytest.raises(AssertionError): - pad1d(x, (-1, -1), mode='reflect', value=0.) - - -def test_unpad1d(): - x = torch.randn(1, 1, 20) - - u1 = unpad1d(x, (5, 5)) - assert u1.shape[-1] == 10 - u2 = unpad1d(x, (0, 5)) - assert u2.shape[-1] == 15 - u3 = unpad1d(x, (5, 0)) - assert u3.shape[-1] == 15 - u4 = unpad1d(x, (0, 0)) - assert u4.shape[-1] == x.shape[-1] - - with pytest.raises(AssertionError): - unpad1d(x, (-1, 0)) - - with pytest.raises(AssertionError): - unpad1d(x, (0, -1)) - - with pytest.raises(AssertionError): - unpad1d(x, (-1, -1)) - - -class TestNormConv1d: - - def test_norm_conv1d_modules(self): - N, C, T = 2, 2, random.randrange(1, 100_000) - t0 = torch.randn(N, C, T) - - C_out, kernel_size, stride = 1, 4, 1 - expected_out_length = int((T - kernel_size) / stride + 1) - wn_conv = NormConv1d(C, 1, kernel_size=4, norm='weight_norm') - gn_conv = NormConv1d(C, 1, kernel_size=4, norm='time_group_norm') - nn_conv = NormConv1d(C, 1, kernel_size=4, norm='none') - - assert isinstance(wn_conv.norm, nn.Identity) - assert isinstance(wn_conv.conv, nn.Conv1d) - - assert isinstance(gn_conv.norm, nn.GroupNorm) - assert isinstance(gn_conv.conv, nn.Conv1d) - - assert isinstance(nn_conv.norm, nn.Identity) - assert isinstance(nn_conv.conv, nn.Conv1d) - - for conv_layer in [wn_conv, gn_conv, nn_conv]: - out = conv_layer(t0) - assert isinstance(out, torch.Tensor) - assert list(out.shape) == [N, C_out, expected_out_length] - - -class TestNormConvTranspose1d: - - def test_normalizations(self): - N, C, T = 2, 2, random.randrange(1, 100_000) - t0 = torch.randn(N, C, T) - - C_out, kernel_size, stride = 1, 4, 1 - expected_out_length = (T - 1) * stride + (kernel_size - 1) + 1 - - wn_convtr = NormConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, norm='weight_norm') - gn_convtr = NormConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, norm='time_group_norm') - nn_convtr = NormConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, norm='none') - - assert isinstance(wn_convtr.norm, nn.Identity) - assert isinstance(wn_convtr.convtr, nn.ConvTranspose1d) - - assert isinstance(gn_convtr.norm, nn.GroupNorm) - assert isinstance(gn_convtr.convtr, nn.ConvTranspose1d) - - assert isinstance(nn_convtr.norm, nn.Identity) - assert isinstance(nn_convtr.convtr, nn.ConvTranspose1d) - - for convtr_layer in [wn_convtr, gn_convtr, nn_convtr]: - out = convtr_layer(t0) - assert isinstance(out, torch.Tensor) - assert list(out.shape) == [N, C_out, expected_out_length] - - -class TestStreamableConv1d: - - def get_streamable_conv1d_output_length(self, length, kernel_size, stride, dilation): - # StreamableConv1d internally pads to make sure that the last window is full - padding_total = (kernel_size - 1) * dilation - (stride - 1) - n_frames = (length - kernel_size + padding_total) / stride + 1 - ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total) - return ideal_length // stride - - def test_streamable_conv1d(self): - N, C, T = 2, 2, random.randrange(1, 100_000) - t0 = torch.randn(N, C, T) - C_out = 1 - - # conv params are [(kernel_size, stride, dilation)] - conv_params = [(4, 1, 1), (4, 2, 1), (3, 1, 3), (10, 5, 1), (3, 2, 3)] - for causal, (kernel_size, stride, dilation) in product([False, True], conv_params): - expected_out_length = self.get_streamable_conv1d_output_length(T, kernel_size, stride, dilation) - sconv = StreamableConv1d(C, C_out, kernel_size=kernel_size, stride=stride, dilation=dilation, causal=causal) - out = sconv(t0) - assert isinstance(out, torch.Tensor) - print(list(out.shape), [N, C_out, expected_out_length]) - assert list(out.shape) == [N, C_out, expected_out_length] - - -class TestStreamableConvTranspose1d: - - def get_streamable_convtr1d_output_length(self, length, kernel_size, stride): - padding_total = (kernel_size - stride) - return (length - 1) * stride - padding_total + (kernel_size - 1) + 1 - - def test_streamable_convtr1d(self): - N, C, T = 2, 2, random.randrange(1, 100_000) - t0 = torch.randn(N, C, T) - - C_out = 1 - - with pytest.raises(AssertionError): - StreamableConvTranspose1d(C, C_out, kernel_size=4, causal=False, trim_right_ratio=0.5) - StreamableConvTranspose1d(C, C_out, kernel_size=4, causal=True, trim_right_ratio=-1.) - StreamableConvTranspose1d(C, C_out, kernel_size=4, causal=True, trim_right_ratio=2) - - # causal params are [(causal, trim_right)] - causal_params = [(False, 1.0), (True, 1.0), (True, 0.5), (True, 0.0)] - # conv params are [(kernel_size, stride)] - conv_params = [(4, 1), (4, 2), (3, 1), (10, 5)] - for ((causal, trim_right_ratio), (kernel_size, stride)) in product(causal_params, conv_params): - expected_out_length = self.get_streamable_convtr1d_output_length(T, kernel_size, stride) - sconvtr = StreamableConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, - causal=causal, trim_right_ratio=trim_right_ratio) - out = sconvtr(t0) - assert isinstance(out, torch.Tensor) - assert list(out.shape) == [N, C_out, expected_out_length] diff --git a/spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/inpaint_zoom/utils/zoom_out_utils.py b/spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/inpaint_zoom/utils/zoom_out_utils.py deleted file mode 100644 index 7f9d0605691e5ad4a92979547b8795a8d3f24be3..0000000000000000000000000000000000000000 --- a/spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/inpaint_zoom/utils/zoom_out_utils.py +++ /dev/null @@ -1,47 +0,0 @@ -import cv2 -import numpy as np -from PIL import Image - - -def write_video(file_path, frames, fps): - """ - Writes frames to an mp4 video file - :param file_path: Path to output video, must end with .mp4 - :param frames: List of PIL.Image objects - :param fps: Desired frame rate - """ - - w, h = frames[0].size - fourcc = cv2.VideoWriter_fourcc("m", "p", "4", "v") - writer = cv2.VideoWriter(file_path, fourcc, fps, (w, h)) - - for frame in frames: - np_frame = np.array(frame.convert("RGB")) - cv_frame = cv2.cvtColor(np_frame, cv2.COLOR_RGB2BGR) - writer.write(cv_frame) - - writer.release() - - -def dummy(images, **kwargs): - return images, False - - -def preprocess_image(current_image, steps, image_size): - next_image = np.array(current_image.convert("RGBA")) * 0 - prev_image = current_image.resize((image_size - 2 * steps, image_size - 2 * steps)) - prev_image = prev_image.convert("RGBA") - prev_image = np.array(prev_image) - next_image[:, :, 3] = 1 - next_image[steps : image_size - steps, steps : image_size - steps, :] = prev_image - prev_image = Image.fromarray(next_image) - - return prev_image - - -def preprocess_mask_image(current_image): - mask_image = np.array(current_image)[:, :, 3] # assume image has alpha mask (use .mode to check for "RGBA") - mask_image = Image.fromarray(255 - mask_image).convert("RGB") - current_image = current_image.convert("RGB") - - return current_image, mask_image diff --git a/spaces/Ashrafb/codellama-34b/app.py b/spaces/Ashrafb/codellama-34b/app.py deleted file mode 100644 index db9f71f2ca9a0e1b878ad874b5e536857830551f..0000000000000000000000000000000000000000 --- a/spaces/Ashrafb/codellama-34b/app.py +++ /dev/null @@ -1,260 +0,0 @@ -import os -from typing import Iterator - -import gradio as gr - -from model import run - -HF_PUBLIC = os.environ.get("HF_PUBLIC", False) - -DEFAULT_SYSTEM_PROMPT = """\ -You are a helpful, respectful and honest assistant with a deep knowledge of code and software design. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\ -""" -MAX_MAX_NEW_TOKENS = 10000 -DEFAULT_MAX_NEW_TOKENS = 1024 -MAX_INPUT_TOKEN_LENGTH = 10000 - -DESCRIPTION = """ -# Code Llama 34B Chat - - - -""" - -LICENSE = """ -

- ---- -As a derivate work of Code Llama by Meta, -this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/codellama-2-34b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/codellama-2-34b-chat/blob/main/USE_POLICY.md). -""" - - -def clear_and_save_textbox(message: str) -> tuple[str, str]: - return '', message - - -def display_input(message: str, - history: list[tuple[str, str]]) -> list[tuple[str, str]]: - history.append((message, '')) - return history - - -def delete_prev_fn( - history: list[tuple[str, str]]) -> tuple[list[tuple[str, str]], str]: - try: - message, _ = history.pop() - except IndexError: - message = '' - return history, message or '' - - -def generate( - message: str, - history_with_input: list[tuple[str, str]], - system_prompt: str, - max_new_tokens: int, - temperature: float, - top_p: float, - top_k: int, -) -> Iterator[list[tuple[str, str]]]: - if max_new_tokens > MAX_MAX_NEW_TOKENS: - raise ValueError - - history = history_with_input[:-1] - generator = run(message, history, system_prompt, max_new_tokens, temperature, top_p, top_k) - try: - first_response = next(generator) - yield history + [(message, first_response)] - except StopIteration: - yield history + [(message, '')] - for response in generator: - yield history + [(message, response)] - - -def process_example(message: str) -> tuple[str, list[tuple[str, str]]]: - generator = generate(message, [], DEFAULT_SYSTEM_PROMPT, 1024, 1, 0.95, 50) - for x in generator: - pass - return '', x - - -def check_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> None: - input_token_length = len(message) + len(chat_history) - if input_token_length > MAX_INPUT_TOKEN_LENGTH: - raise gr.Error(f'The accumulated input is too long ({input_token_length} > {MAX_INPUT_TOKEN_LENGTH}). Clear your chat history and try again.') - - -with gr.Blocks(css=".gradio-container {background-color: #FFE4C4}") as demo: - - with gr.Group(): - chatbot = gr.Chatbot(label='Chatbot') - with gr.Row(): - textbox = gr.Textbox( - container=False, - show_label=False, - placeholder='Type a message...', - scale=10, - ) - submit_button = gr.Button('Submit', - variant='primary', - scale=1, - min_width=0) - with gr.Row(): - retry_button = gr.Button('🔄 Retry', variant='secondary') - undo_button = gr.Button('↩️ Undo', variant='secondary') - clear_button = gr.Button('🗑️ Clear', variant='secondary') - - saved_input = gr.State() - - with gr.Accordion(label='Advanced options', open=False): - system_prompt = gr.Textbox(label='System prompt', - value=DEFAULT_SYSTEM_PROMPT, - lines=6) - max_new_tokens = gr.Slider( - label='Max new tokens', - minimum=1, - maximum=MAX_MAX_NEW_TOKENS, - step=1, - value=DEFAULT_MAX_NEW_TOKENS, - ) - temperature = gr.Slider( - label='Temperature', - minimum=0.1, - maximum=4.0, - step=0.1, - value=0.1, - ) - top_p = gr.Slider( - label='Top-p (nucleus sampling)', - minimum=0.05, - maximum=1.0, - step=0.05, - value=0.9, - ) - top_k = gr.Slider( - label='Top-k', - minimum=1, - maximum=1000, - step=1, - value=10, - ) - - - - gr.Markdown(LICENSE) - - textbox.submit( - fn=clear_and_save_textbox, - inputs=textbox, - outputs=[textbox, saved_input], - api_name=False, - queue=False, - ).then( - fn=display_input, - inputs=[saved_input, chatbot], - outputs=chatbot, - api_name=False, - queue=False, - ).then( - fn=check_input_token_length, - inputs=[saved_input, chatbot, system_prompt], - api_name=False, - queue=False, - ).success( - fn=generate, - inputs=[ - saved_input, - chatbot, - system_prompt, - max_new_tokens, - temperature, - top_p, - top_k, - ], - outputs=chatbot, - api_name=False, - ) - - button_event_preprocess = submit_button.click( - fn=clear_and_save_textbox, - inputs=textbox, - outputs=[textbox, saved_input], - api_name=False, - queue=False, - ).then( - fn=display_input, - inputs=[saved_input, chatbot], - outputs=chatbot, - api_name=False, - queue=False, - ).then( - fn=check_input_token_length, - inputs=[saved_input, chatbot, system_prompt], - api_name=False, - queue=False, - ).success( - fn=generate, - inputs=[ - saved_input, - chatbot, - system_prompt, - max_new_tokens, - temperature, - top_p, - top_k, - ], - outputs=chatbot, - api_name=False, - ) - - retry_button.click( - fn=delete_prev_fn, - inputs=chatbot, - outputs=[chatbot, saved_input], - api_name=False, - queue=False, - ).then( - fn=display_input, - inputs=[saved_input, chatbot], - outputs=chatbot, - api_name=False, - queue=False, - ).then( - fn=generate, - inputs=[ - saved_input, - chatbot, - system_prompt, - max_new_tokens, - temperature, - top_p, - top_k, - ], - outputs=chatbot, - api_name=False, - ) - - undo_button.click( - fn=delete_prev_fn, - inputs=chatbot, - outputs=[chatbot, saved_input], - api_name=False, - queue=False, - ).then( - fn=lambda x: x, - inputs=[saved_input], - outputs=textbox, - api_name=False, - queue=False, - ) - - clear_button.click( - fn=lambda: ([], ''), - outputs=[chatbot, saved_input], - queue=False, - api_name=False, - ) - -demo.queue(max_size=32).launch(share=HF_PUBLIC) - diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/langrussianmodel.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/langrussianmodel.py deleted file mode 100644 index 39a5388948ef12b69b65fbfa89a84c6ef4a4bfd6..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/langrussianmodel.py +++ /dev/null @@ -1,5725 +0,0 @@ -from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel - -# 3: Positive -# 2: Likely -# 1: Unlikely -# 0: Negative - -RUSSIAN_LANG_MODEL = { - 37: { # 'А' - 37: 0, # 'А' - 44: 1, # 'Б' - 33: 1, # 'В' - 46: 1, # 'Г' - 41: 1, # 'Д' - 48: 1, # 'Е' - 56: 1, # 'Ж' - 51: 1, # 'З' - 42: 1, # 'И' - 60: 1, # 'Й' - 36: 1, # 'К' - 49: 1, # 'Л' - 38: 1, # 'М' - 31: 2, # 'Н' - 34: 1, # 'О' - 35: 1, # 'П' - 45: 1, # 'Р' - 32: 1, # 'С' - 40: 1, # 'Т' - 52: 1, # 'У' - 53: 1, # 'Ф' - 55: 1, # 'Х' - 58: 1, # 'Ц' - 50: 1, # 'Ч' - 57: 1, # 'Ш' - 63: 1, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 1, # 'Ю' - 43: 1, # 'Я' - 3: 1, # 'а' - 21: 2, # 'б' - 10: 2, # 'в' - 19: 2, # 'г' - 13: 2, # 'д' - 2: 0, # 'е' - 24: 1, # 'ж' - 20: 1, # 'з' - 4: 0, # 'и' - 23: 1, # 'й' - 11: 2, # 'к' - 8: 3, # 'л' - 12: 2, # 'м' - 5: 2, # 'н' - 1: 0, # 'о' - 15: 2, # 'п' - 9: 2, # 'р' - 7: 2, # 'с' - 6: 2, # 'т' - 14: 2, # 'у' - 39: 2, # 'ф' - 26: 2, # 'х' - 28: 0, # 'ц' - 22: 1, # 'ч' - 25: 2, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 1, # 'э' - 27: 0, # 'ю' - 16: 0, # 'я' - }, - 44: { # 'Б' - 37: 1, # 'А' - 44: 0, # 'Б' - 33: 1, # 'В' - 46: 1, # 'Г' - 41: 0, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 1, # 'Л' - 38: 1, # 'М' - 31: 1, # 'Н' - 34: 1, # 'О' - 35: 0, # 'П' - 45: 1, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 1, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 1, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 1, # 'Я' - 3: 2, # 'а' - 21: 0, # 'б' - 10: 0, # 'в' - 19: 0, # 'г' - 13: 1, # 'д' - 2: 3, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 2, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 2, # 'л' - 12: 0, # 'м' - 5: 0, # 'н' - 1: 3, # 'о' - 15: 0, # 'п' - 9: 2, # 'р' - 7: 0, # 'с' - 6: 0, # 'т' - 14: 2, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 2, # 'ы' - 17: 1, # 'ь' - 30: 2, # 'э' - 27: 1, # 'ю' - 16: 1, # 'я' - }, - 33: { # 'В' - 37: 2, # 'А' - 44: 0, # 'Б' - 33: 1, # 'В' - 46: 0, # 'Г' - 41: 1, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 1, # 'К' - 49: 1, # 'Л' - 38: 1, # 'М' - 31: 1, # 'Н' - 34: 1, # 'О' - 35: 1, # 'П' - 45: 1, # 'Р' - 32: 1, # 'С' - 40: 1, # 'Т' - 52: 1, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 1, # 'Ш' - 63: 0, # 'Щ' - 62: 1, # 'Ы' - 61: 1, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 1, # 'Я' - 3: 2, # 'а' - 21: 1, # 'б' - 10: 1, # 'в' - 19: 1, # 'г' - 13: 2, # 'д' - 2: 3, # 'е' - 24: 0, # 'ж' - 20: 2, # 'з' - 4: 2, # 'и' - 23: 0, # 'й' - 11: 1, # 'к' - 8: 2, # 'л' - 12: 2, # 'м' - 5: 2, # 'н' - 1: 3, # 'о' - 15: 2, # 'п' - 9: 2, # 'р' - 7: 3, # 'с' - 6: 2, # 'т' - 14: 2, # 'у' - 39: 0, # 'ф' - 26: 1, # 'х' - 28: 1, # 'ц' - 22: 2, # 'ч' - 25: 1, # 'ш' - 29: 0, # 'щ' - 54: 1, # 'ъ' - 18: 3, # 'ы' - 17: 1, # 'ь' - 30: 2, # 'э' - 27: 0, # 'ю' - 16: 1, # 'я' - }, - 46: { # 'Г' - 37: 1, # 'А' - 44: 1, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 1, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 1, # 'Л' - 38: 1, # 'М' - 31: 1, # 'Н' - 34: 1, # 'О' - 35: 1, # 'П' - 45: 1, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 1, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 2, # 'а' - 21: 0, # 'б' - 10: 1, # 'в' - 19: 0, # 'г' - 13: 2, # 'д' - 2: 2, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 2, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 2, # 'л' - 12: 1, # 'м' - 5: 1, # 'н' - 1: 3, # 'о' - 15: 0, # 'п' - 9: 2, # 'р' - 7: 0, # 'с' - 6: 0, # 'т' - 14: 2, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 1, # 'ь' - 30: 1, # 'э' - 27: 1, # 'ю' - 16: 0, # 'я' - }, - 41: { # 'Д' - 37: 1, # 'А' - 44: 0, # 'Б' - 33: 1, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 2, # 'Е' - 56: 1, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 1, # 'К' - 49: 1, # 'Л' - 38: 0, # 'М' - 31: 1, # 'Н' - 34: 1, # 'О' - 35: 0, # 'П' - 45: 1, # 'Р' - 32: 1, # 'С' - 40: 0, # 'Т' - 52: 1, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 1, # 'Ц' - 50: 1, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 1, # 'Ы' - 61: 1, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 1, # 'Я' - 3: 3, # 'а' - 21: 0, # 'б' - 10: 2, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 2, # 'е' - 24: 3, # 'ж' - 20: 1, # 'з' - 4: 2, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 2, # 'л' - 12: 1, # 'м' - 5: 1, # 'н' - 1: 3, # 'о' - 15: 0, # 'п' - 9: 2, # 'р' - 7: 0, # 'с' - 6: 0, # 'т' - 14: 2, # 'у' - 39: 0, # 'ф' - 26: 1, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 1, # 'ы' - 17: 1, # 'ь' - 30: 2, # 'э' - 27: 1, # 'ю' - 16: 1, # 'я' - }, - 48: { # 'Е' - 37: 1, # 'А' - 44: 1, # 'Б' - 33: 1, # 'В' - 46: 1, # 'Г' - 41: 1, # 'Д' - 48: 1, # 'Е' - 56: 1, # 'Ж' - 51: 1, # 'З' - 42: 1, # 'И' - 60: 1, # 'Й' - 36: 1, # 'К' - 49: 1, # 'Л' - 38: 1, # 'М' - 31: 2, # 'Н' - 34: 1, # 'О' - 35: 1, # 'П' - 45: 2, # 'Р' - 32: 2, # 'С' - 40: 1, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 1, # 'Х' - 58: 1, # 'Ц' - 50: 1, # 'Ч' - 57: 1, # 'Ш' - 63: 1, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 1, # 'Я' - 3: 0, # 'а' - 21: 0, # 'б' - 10: 2, # 'в' - 19: 2, # 'г' - 13: 2, # 'д' - 2: 2, # 'е' - 24: 1, # 'ж' - 20: 1, # 'з' - 4: 0, # 'и' - 23: 2, # 'й' - 11: 1, # 'к' - 8: 2, # 'л' - 12: 2, # 'м' - 5: 1, # 'н' - 1: 0, # 'о' - 15: 1, # 'п' - 9: 1, # 'р' - 7: 3, # 'с' - 6: 0, # 'т' - 14: 0, # 'у' - 39: 1, # 'ф' - 26: 1, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 1, # 'ш' - 29: 2, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 0, # 'э' - 27: 1, # 'ю' - 16: 0, # 'я' - }, - 56: { # 'Ж' - 37: 1, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 1, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 1, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 1, # 'Н' - 34: 1, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 1, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 2, # 'а' - 21: 1, # 'б' - 10: 0, # 'в' - 19: 1, # 'г' - 13: 1, # 'д' - 2: 2, # 'е' - 24: 1, # 'ж' - 20: 0, # 'з' - 4: 2, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 0, # 'л' - 12: 1, # 'м' - 5: 0, # 'н' - 1: 2, # 'о' - 15: 0, # 'п' - 9: 1, # 'р' - 7: 0, # 'с' - 6: 0, # 'т' - 14: 2, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 0, # 'э' - 27: 2, # 'ю' - 16: 0, # 'я' - }, - 51: { # 'З' - 37: 1, # 'А' - 44: 0, # 'Б' - 33: 1, # 'В' - 46: 1, # 'Г' - 41: 1, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 1, # 'Л' - 38: 1, # 'М' - 31: 1, # 'Н' - 34: 1, # 'О' - 35: 0, # 'П' - 45: 1, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 1, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 1, # 'Ы' - 61: 1, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 1, # 'б' - 10: 2, # 'в' - 19: 0, # 'г' - 13: 2, # 'д' - 2: 2, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 2, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 1, # 'л' - 12: 1, # 'м' - 5: 2, # 'н' - 1: 2, # 'о' - 15: 0, # 'п' - 9: 1, # 'р' - 7: 0, # 'с' - 6: 0, # 'т' - 14: 1, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 1, # 'ы' - 17: 0, # 'ь' - 30: 0, # 'э' - 27: 0, # 'ю' - 16: 1, # 'я' - }, - 42: { # 'И' - 37: 1, # 'А' - 44: 1, # 'Б' - 33: 1, # 'В' - 46: 1, # 'Г' - 41: 1, # 'Д' - 48: 2, # 'Е' - 56: 1, # 'Ж' - 51: 1, # 'З' - 42: 1, # 'И' - 60: 1, # 'Й' - 36: 1, # 'К' - 49: 1, # 'Л' - 38: 1, # 'М' - 31: 1, # 'Н' - 34: 1, # 'О' - 35: 1, # 'П' - 45: 1, # 'Р' - 32: 2, # 'С' - 40: 1, # 'Т' - 52: 0, # 'У' - 53: 1, # 'Ф' - 55: 1, # 'Х' - 58: 1, # 'Ц' - 50: 1, # 'Ч' - 57: 0, # 'Ш' - 63: 1, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 1, # 'Ю' - 43: 1, # 'Я' - 3: 1, # 'а' - 21: 2, # 'б' - 10: 2, # 'в' - 19: 2, # 'г' - 13: 2, # 'д' - 2: 2, # 'е' - 24: 0, # 'ж' - 20: 2, # 'з' - 4: 1, # 'и' - 23: 0, # 'й' - 11: 1, # 'к' - 8: 2, # 'л' - 12: 2, # 'м' - 5: 2, # 'н' - 1: 1, # 'о' - 15: 1, # 'п' - 9: 2, # 'р' - 7: 2, # 'с' - 6: 2, # 'т' - 14: 1, # 'у' - 39: 1, # 'ф' - 26: 2, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 1, # 'ш' - 29: 1, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 0, # 'э' - 27: 1, # 'ю' - 16: 0, # 'я' - }, - 60: { # 'Й' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 1, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 1, # 'К' - 49: 1, # 'Л' - 38: 0, # 'М' - 31: 1, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 1, # 'С' - 40: 1, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 1, # 'Х' - 58: 1, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 0, # 'а' - 21: 0, # 'б' - 10: 0, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 1, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 0, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 0, # 'л' - 12: 0, # 'м' - 5: 0, # 'н' - 1: 2, # 'о' - 15: 0, # 'п' - 9: 0, # 'р' - 7: 0, # 'с' - 6: 0, # 'т' - 14: 0, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 0, # 'э' - 27: 0, # 'ю' - 16: 0, # 'я' - }, - 36: { # 'К' - 37: 2, # 'А' - 44: 0, # 'Б' - 33: 1, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 1, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 1, # 'Л' - 38: 0, # 'М' - 31: 1, # 'Н' - 34: 2, # 'О' - 35: 1, # 'П' - 45: 1, # 'Р' - 32: 1, # 'С' - 40: 1, # 'Т' - 52: 1, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 1, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 0, # 'б' - 10: 1, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 2, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 2, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 2, # 'л' - 12: 0, # 'м' - 5: 1, # 'н' - 1: 3, # 'о' - 15: 0, # 'п' - 9: 2, # 'р' - 7: 2, # 'с' - 6: 2, # 'т' - 14: 2, # 'у' - 39: 0, # 'ф' - 26: 1, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 1, # 'ы' - 17: 1, # 'ь' - 30: 2, # 'э' - 27: 1, # 'ю' - 16: 0, # 'я' - }, - 49: { # 'Л' - 37: 2, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 1, # 'Г' - 41: 0, # 'Д' - 48: 1, # 'Е' - 56: 1, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 1, # 'К' - 49: 1, # 'Л' - 38: 1, # 'М' - 31: 0, # 'Н' - 34: 1, # 'О' - 35: 1, # 'П' - 45: 0, # 'Р' - 32: 1, # 'С' - 40: 1, # 'Т' - 52: 1, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 1, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 1, # 'Ы' - 61: 1, # 'Ь' - 47: 0, # 'Э' - 59: 1, # 'Ю' - 43: 1, # 'Я' - 3: 2, # 'а' - 21: 0, # 'б' - 10: 0, # 'в' - 19: 1, # 'г' - 13: 0, # 'д' - 2: 2, # 'е' - 24: 1, # 'ж' - 20: 0, # 'з' - 4: 2, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 1, # 'л' - 12: 0, # 'м' - 5: 1, # 'н' - 1: 2, # 'о' - 15: 0, # 'п' - 9: 0, # 'р' - 7: 0, # 'с' - 6: 0, # 'т' - 14: 2, # 'у' - 39: 0, # 'ф' - 26: 1, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 1, # 'ы' - 17: 1, # 'ь' - 30: 2, # 'э' - 27: 2, # 'ю' - 16: 1, # 'я' - }, - 38: { # 'М' - 37: 1, # 'А' - 44: 1, # 'Б' - 33: 1, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 1, # 'К' - 49: 1, # 'Л' - 38: 1, # 'М' - 31: 1, # 'Н' - 34: 1, # 'О' - 35: 1, # 'П' - 45: 1, # 'Р' - 32: 1, # 'С' - 40: 1, # 'Т' - 52: 1, # 'У' - 53: 1, # 'Ф' - 55: 1, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 1, # 'Ы' - 61: 0, # 'Ь' - 47: 1, # 'Э' - 59: 0, # 'Ю' - 43: 1, # 'Я' - 3: 3, # 'а' - 21: 0, # 'б' - 10: 0, # 'в' - 19: 1, # 'г' - 13: 0, # 'д' - 2: 2, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 1, # 'л' - 12: 1, # 'м' - 5: 2, # 'н' - 1: 3, # 'о' - 15: 0, # 'п' - 9: 1, # 'р' - 7: 1, # 'с' - 6: 0, # 'т' - 14: 2, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 3, # 'ы' - 17: 1, # 'ь' - 30: 2, # 'э' - 27: 1, # 'ю' - 16: 1, # 'я' - }, - 31: { # 'Н' - 37: 2, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 1, # 'Г' - 41: 1, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 1, # 'З' - 42: 2, # 'И' - 60: 0, # 'Й' - 36: 1, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 1, # 'Н' - 34: 1, # 'О' - 35: 0, # 'П' - 45: 1, # 'Р' - 32: 1, # 'С' - 40: 1, # 'Т' - 52: 1, # 'У' - 53: 1, # 'Ф' - 55: 1, # 'Х' - 58: 1, # 'Ц' - 50: 1, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 1, # 'Ы' - 61: 1, # 'Ь' - 47: 1, # 'Э' - 59: 0, # 'Ю' - 43: 1, # 'Я' - 3: 3, # 'а' - 21: 0, # 'б' - 10: 0, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 3, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 0, # 'л' - 12: 0, # 'м' - 5: 0, # 'н' - 1: 3, # 'о' - 15: 0, # 'п' - 9: 1, # 'р' - 7: 0, # 'с' - 6: 0, # 'т' - 14: 3, # 'у' - 39: 0, # 'ф' - 26: 1, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 1, # 'ы' - 17: 2, # 'ь' - 30: 1, # 'э' - 27: 1, # 'ю' - 16: 1, # 'я' - }, - 34: { # 'О' - 37: 0, # 'А' - 44: 1, # 'Б' - 33: 1, # 'В' - 46: 1, # 'Г' - 41: 2, # 'Д' - 48: 1, # 'Е' - 56: 1, # 'Ж' - 51: 1, # 'З' - 42: 1, # 'И' - 60: 1, # 'Й' - 36: 1, # 'К' - 49: 2, # 'Л' - 38: 1, # 'М' - 31: 2, # 'Н' - 34: 1, # 'О' - 35: 1, # 'П' - 45: 2, # 'Р' - 32: 1, # 'С' - 40: 1, # 'Т' - 52: 1, # 'У' - 53: 1, # 'Ф' - 55: 1, # 'Х' - 58: 0, # 'Ц' - 50: 1, # 'Ч' - 57: 1, # 'Ш' - 63: 1, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 1, # 'Я' - 3: 1, # 'а' - 21: 2, # 'б' - 10: 1, # 'в' - 19: 2, # 'г' - 13: 2, # 'д' - 2: 0, # 'е' - 24: 1, # 'ж' - 20: 1, # 'з' - 4: 0, # 'и' - 23: 1, # 'й' - 11: 2, # 'к' - 8: 2, # 'л' - 12: 1, # 'м' - 5: 3, # 'н' - 1: 0, # 'о' - 15: 2, # 'п' - 9: 2, # 'р' - 7: 2, # 'с' - 6: 2, # 'т' - 14: 1, # 'у' - 39: 1, # 'ф' - 26: 2, # 'х' - 28: 1, # 'ц' - 22: 2, # 'ч' - 25: 2, # 'ш' - 29: 1, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 0, # 'э' - 27: 0, # 'ю' - 16: 0, # 'я' - }, - 35: { # 'П' - 37: 1, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 1, # 'Л' - 38: 0, # 'М' - 31: 1, # 'Н' - 34: 1, # 'О' - 35: 1, # 'П' - 45: 2, # 'Р' - 32: 1, # 'С' - 40: 1, # 'Т' - 52: 1, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 1, # 'Ы' - 61: 1, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 1, # 'Я' - 3: 2, # 'а' - 21: 0, # 'б' - 10: 0, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 2, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 2, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 2, # 'л' - 12: 0, # 'м' - 5: 1, # 'н' - 1: 3, # 'о' - 15: 0, # 'п' - 9: 3, # 'р' - 7: 1, # 'с' - 6: 1, # 'т' - 14: 2, # 'у' - 39: 1, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 1, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 1, # 'ы' - 17: 2, # 'ь' - 30: 1, # 'э' - 27: 0, # 'ю' - 16: 2, # 'я' - }, - 45: { # 'Р' - 37: 2, # 'А' - 44: 1, # 'Б' - 33: 1, # 'В' - 46: 1, # 'Г' - 41: 1, # 'Д' - 48: 2, # 'Е' - 56: 1, # 'Ж' - 51: 0, # 'З' - 42: 2, # 'И' - 60: 0, # 'Й' - 36: 1, # 'К' - 49: 1, # 'Л' - 38: 1, # 'М' - 31: 1, # 'Н' - 34: 2, # 'О' - 35: 0, # 'П' - 45: 1, # 'Р' - 32: 1, # 'С' - 40: 1, # 'Т' - 52: 1, # 'У' - 53: 0, # 'Ф' - 55: 1, # 'Х' - 58: 1, # 'Ц' - 50: 1, # 'Ч' - 57: 1, # 'Ш' - 63: 0, # 'Щ' - 62: 1, # 'Ы' - 61: 1, # 'Ь' - 47: 1, # 'Э' - 59: 1, # 'Ю' - 43: 1, # 'Я' - 3: 3, # 'а' - 21: 0, # 'б' - 10: 1, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 2, # 'е' - 24: 1, # 'ж' - 20: 0, # 'з' - 4: 2, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 0, # 'л' - 12: 0, # 'м' - 5: 0, # 'н' - 1: 3, # 'о' - 15: 0, # 'п' - 9: 1, # 'р' - 7: 0, # 'с' - 6: 0, # 'т' - 14: 2, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 2, # 'ы' - 17: 0, # 'ь' - 30: 1, # 'э' - 27: 1, # 'ю' - 16: 2, # 'я' - }, - 32: { # 'С' - 37: 1, # 'А' - 44: 1, # 'Б' - 33: 1, # 'В' - 46: 1, # 'Г' - 41: 1, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 1, # 'К' - 49: 1, # 'Л' - 38: 1, # 'М' - 31: 1, # 'Н' - 34: 1, # 'О' - 35: 1, # 'П' - 45: 1, # 'Р' - 32: 1, # 'С' - 40: 2, # 'Т' - 52: 1, # 'У' - 53: 0, # 'Ф' - 55: 1, # 'Х' - 58: 1, # 'Ц' - 50: 1, # 'Ч' - 57: 1, # 'Ш' - 63: 0, # 'Щ' - 62: 1, # 'Ы' - 61: 1, # 'Ь' - 47: 1, # 'Э' - 59: 1, # 'Ю' - 43: 1, # 'Я' - 3: 2, # 'а' - 21: 1, # 'б' - 10: 2, # 'в' - 19: 1, # 'г' - 13: 2, # 'д' - 2: 3, # 'е' - 24: 1, # 'ж' - 20: 1, # 'з' - 4: 2, # 'и' - 23: 0, # 'й' - 11: 2, # 'к' - 8: 2, # 'л' - 12: 2, # 'м' - 5: 2, # 'н' - 1: 2, # 'о' - 15: 2, # 'п' - 9: 2, # 'р' - 7: 1, # 'с' - 6: 3, # 'т' - 14: 2, # 'у' - 39: 1, # 'ф' - 26: 1, # 'х' - 28: 1, # 'ц' - 22: 1, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 1, # 'ъ' - 18: 1, # 'ы' - 17: 1, # 'ь' - 30: 2, # 'э' - 27: 1, # 'ю' - 16: 1, # 'я' - }, - 40: { # 'Т' - 37: 1, # 'А' - 44: 0, # 'Б' - 33: 1, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 1, # 'К' - 49: 1, # 'Л' - 38: 1, # 'М' - 31: 1, # 'Н' - 34: 2, # 'О' - 35: 0, # 'П' - 45: 1, # 'Р' - 32: 1, # 'С' - 40: 1, # 'Т' - 52: 1, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 1, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 1, # 'Ы' - 61: 1, # 'Ь' - 47: 1, # 'Э' - 59: 1, # 'Ю' - 43: 1, # 'Я' - 3: 3, # 'а' - 21: 1, # 'б' - 10: 2, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 3, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 2, # 'и' - 23: 0, # 'й' - 11: 1, # 'к' - 8: 1, # 'л' - 12: 0, # 'м' - 5: 0, # 'н' - 1: 3, # 'о' - 15: 0, # 'п' - 9: 2, # 'р' - 7: 1, # 'с' - 6: 0, # 'т' - 14: 2, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 1, # 'щ' - 54: 0, # 'ъ' - 18: 3, # 'ы' - 17: 1, # 'ь' - 30: 2, # 'э' - 27: 1, # 'ю' - 16: 1, # 'я' - }, - 52: { # 'У' - 37: 1, # 'А' - 44: 1, # 'Б' - 33: 1, # 'В' - 46: 1, # 'Г' - 41: 1, # 'Д' - 48: 1, # 'Е' - 56: 1, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 1, # 'Й' - 36: 1, # 'К' - 49: 1, # 'Л' - 38: 1, # 'М' - 31: 1, # 'Н' - 34: 1, # 'О' - 35: 1, # 'П' - 45: 1, # 'Р' - 32: 1, # 'С' - 40: 1, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 1, # 'Х' - 58: 0, # 'Ц' - 50: 1, # 'Ч' - 57: 1, # 'Ш' - 63: 1, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 1, # 'Ю' - 43: 0, # 'Я' - 3: 1, # 'а' - 21: 2, # 'б' - 10: 2, # 'в' - 19: 1, # 'г' - 13: 2, # 'д' - 2: 1, # 'е' - 24: 2, # 'ж' - 20: 2, # 'з' - 4: 2, # 'и' - 23: 1, # 'й' - 11: 1, # 'к' - 8: 2, # 'л' - 12: 2, # 'м' - 5: 1, # 'н' - 1: 2, # 'о' - 15: 1, # 'п' - 9: 2, # 'р' - 7: 2, # 'с' - 6: 2, # 'т' - 14: 0, # 'у' - 39: 1, # 'ф' - 26: 1, # 'х' - 28: 1, # 'ц' - 22: 2, # 'ч' - 25: 1, # 'ш' - 29: 1, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 2, # 'э' - 27: 1, # 'ю' - 16: 0, # 'я' - }, - 53: { # 'Ф' - 37: 1, # 'А' - 44: 1, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 1, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 1, # 'О' - 35: 0, # 'П' - 45: 1, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 1, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 2, # 'а' - 21: 0, # 'б' - 10: 0, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 2, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 2, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 2, # 'л' - 12: 0, # 'м' - 5: 0, # 'н' - 1: 2, # 'о' - 15: 0, # 'п' - 9: 2, # 'р' - 7: 0, # 'с' - 6: 1, # 'т' - 14: 2, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 1, # 'ь' - 30: 2, # 'э' - 27: 0, # 'ю' - 16: 0, # 'я' - }, - 55: { # 'Х' - 37: 1, # 'А' - 44: 0, # 'Б' - 33: 1, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 1, # 'Л' - 38: 1, # 'М' - 31: 1, # 'Н' - 34: 1, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 2, # 'а' - 21: 0, # 'б' - 10: 2, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 2, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 2, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 2, # 'л' - 12: 1, # 'м' - 5: 0, # 'н' - 1: 2, # 'о' - 15: 0, # 'п' - 9: 2, # 'р' - 7: 0, # 'с' - 6: 0, # 'т' - 14: 1, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 1, # 'ь' - 30: 1, # 'э' - 27: 0, # 'ю' - 16: 0, # 'я' - }, - 58: { # 'Ц' - 37: 1, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 1, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 1, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 1, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 1, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 1, # 'а' - 21: 0, # 'б' - 10: 1, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 2, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 2, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 0, # 'л' - 12: 0, # 'м' - 5: 0, # 'н' - 1: 0, # 'о' - 15: 0, # 'п' - 9: 0, # 'р' - 7: 0, # 'с' - 6: 0, # 'т' - 14: 1, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 1, # 'ы' - 17: 0, # 'ь' - 30: 0, # 'э' - 27: 1, # 'ю' - 16: 0, # 'я' - }, - 50: { # 'Ч' - 37: 1, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 1, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 1, # 'Н' - 34: 0, # 'О' - 35: 1, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 1, # 'Т' - 52: 1, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 1, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 2, # 'а' - 21: 0, # 'б' - 10: 0, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 2, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 2, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 1, # 'л' - 12: 0, # 'м' - 5: 0, # 'н' - 1: 1, # 'о' - 15: 0, # 'п' - 9: 1, # 'р' - 7: 0, # 'с' - 6: 3, # 'т' - 14: 2, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 1, # 'ь' - 30: 0, # 'э' - 27: 0, # 'ю' - 16: 0, # 'я' - }, - 57: { # 'Ш' - 37: 1, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 1, # 'К' - 49: 1, # 'Л' - 38: 0, # 'М' - 31: 1, # 'Н' - 34: 1, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 1, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 2, # 'а' - 21: 0, # 'б' - 10: 1, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 2, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 1, # 'и' - 23: 0, # 'й' - 11: 1, # 'к' - 8: 2, # 'л' - 12: 1, # 'м' - 5: 1, # 'н' - 1: 2, # 'о' - 15: 2, # 'п' - 9: 1, # 'р' - 7: 0, # 'с' - 6: 2, # 'т' - 14: 2, # 'у' - 39: 0, # 'ф' - 26: 1, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 1, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 1, # 'э' - 27: 0, # 'ю' - 16: 0, # 'я' - }, - 63: { # 'Щ' - 37: 1, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 1, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 1, # 'а' - 21: 0, # 'б' - 10: 0, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 1, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 1, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 0, # 'л' - 12: 0, # 'м' - 5: 0, # 'н' - 1: 1, # 'о' - 15: 0, # 'п' - 9: 0, # 'р' - 7: 0, # 'с' - 6: 0, # 'т' - 14: 1, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 0, # 'э' - 27: 0, # 'ю' - 16: 0, # 'я' - }, - 62: { # 'Ы' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 1, # 'В' - 46: 1, # 'Г' - 41: 0, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 1, # 'Й' - 36: 1, # 'К' - 49: 1, # 'Л' - 38: 1, # 'М' - 31: 1, # 'Н' - 34: 0, # 'О' - 35: 1, # 'П' - 45: 1, # 'Р' - 32: 1, # 'С' - 40: 1, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 1, # 'Х' - 58: 1, # 'Ц' - 50: 0, # 'Ч' - 57: 1, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 0, # 'а' - 21: 0, # 'б' - 10: 0, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 0, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 0, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 0, # 'л' - 12: 0, # 'м' - 5: 0, # 'н' - 1: 0, # 'о' - 15: 0, # 'п' - 9: 0, # 'р' - 7: 0, # 'с' - 6: 0, # 'т' - 14: 0, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 0, # 'э' - 27: 0, # 'ю' - 16: 0, # 'я' - }, - 61: { # 'Ь' - 37: 0, # 'А' - 44: 1, # 'Б' - 33: 1, # 'В' - 46: 0, # 'Г' - 41: 1, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 1, # 'К' - 49: 0, # 'Л' - 38: 1, # 'М' - 31: 1, # 'Н' - 34: 1, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 1, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 1, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 1, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 1, # 'Ю' - 43: 1, # 'Я' - 3: 0, # 'а' - 21: 0, # 'б' - 10: 0, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 0, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 0, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 0, # 'л' - 12: 0, # 'м' - 5: 0, # 'н' - 1: 0, # 'о' - 15: 0, # 'п' - 9: 0, # 'р' - 7: 0, # 'с' - 6: 0, # 'т' - 14: 0, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 0, # 'э' - 27: 0, # 'ю' - 16: 0, # 'я' - }, - 47: { # 'Э' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 1, # 'В' - 46: 0, # 'Г' - 41: 1, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 1, # 'Й' - 36: 1, # 'К' - 49: 1, # 'Л' - 38: 1, # 'М' - 31: 1, # 'Н' - 34: 0, # 'О' - 35: 1, # 'П' - 45: 1, # 'Р' - 32: 1, # 'С' - 40: 1, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 1, # 'а' - 21: 1, # 'б' - 10: 2, # 'в' - 19: 1, # 'г' - 13: 2, # 'д' - 2: 0, # 'е' - 24: 1, # 'ж' - 20: 0, # 'з' - 4: 0, # 'и' - 23: 2, # 'й' - 11: 2, # 'к' - 8: 2, # 'л' - 12: 2, # 'м' - 5: 2, # 'н' - 1: 0, # 'о' - 15: 1, # 'п' - 9: 2, # 'р' - 7: 1, # 'с' - 6: 3, # 'т' - 14: 1, # 'у' - 39: 1, # 'ф' - 26: 1, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 1, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 0, # 'э' - 27: 0, # 'ю' - 16: 0, # 'я' - }, - 59: { # 'Ю' - 37: 1, # 'А' - 44: 1, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 1, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 1, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 1, # 'Р' - 32: 0, # 'С' - 40: 1, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 1, # 'Ч' - 57: 0, # 'Ш' - 63: 1, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 0, # 'а' - 21: 1, # 'б' - 10: 0, # 'в' - 19: 1, # 'г' - 13: 1, # 'д' - 2: 0, # 'е' - 24: 1, # 'ж' - 20: 0, # 'з' - 4: 0, # 'и' - 23: 0, # 'й' - 11: 1, # 'к' - 8: 2, # 'л' - 12: 1, # 'м' - 5: 2, # 'н' - 1: 0, # 'о' - 15: 1, # 'п' - 9: 1, # 'р' - 7: 1, # 'с' - 6: 0, # 'т' - 14: 0, # 'у' - 39: 0, # 'ф' - 26: 1, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 0, # 'э' - 27: 0, # 'ю' - 16: 0, # 'я' - }, - 43: { # 'Я' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 1, # 'В' - 46: 1, # 'Г' - 41: 0, # 'Д' - 48: 1, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 1, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 1, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 1, # 'С' - 40: 1, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 1, # 'Х' - 58: 0, # 'Ц' - 50: 1, # 'Ч' - 57: 0, # 'Ш' - 63: 1, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 1, # 'Ю' - 43: 1, # 'Я' - 3: 0, # 'а' - 21: 1, # 'б' - 10: 1, # 'в' - 19: 1, # 'г' - 13: 1, # 'д' - 2: 0, # 'е' - 24: 0, # 'ж' - 20: 1, # 'з' - 4: 0, # 'и' - 23: 1, # 'й' - 11: 1, # 'к' - 8: 1, # 'л' - 12: 1, # 'м' - 5: 2, # 'н' - 1: 0, # 'о' - 15: 1, # 'п' - 9: 1, # 'р' - 7: 1, # 'с' - 6: 0, # 'т' - 14: 0, # 'у' - 39: 0, # 'ф' - 26: 1, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 1, # 'ш' - 29: 1, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 0, # 'э' - 27: 0, # 'ю' - 16: 0, # 'я' - }, - 3: { # 'а' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 1, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 1, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 2, # 'а' - 21: 3, # 'б' - 10: 3, # 'в' - 19: 3, # 'г' - 13: 3, # 'д' - 2: 3, # 'е' - 24: 3, # 'ж' - 20: 3, # 'з' - 4: 3, # 'и' - 23: 3, # 'й' - 11: 3, # 'к' - 8: 3, # 'л' - 12: 3, # 'м' - 5: 3, # 'н' - 1: 2, # 'о' - 15: 3, # 'п' - 9: 3, # 'р' - 7: 3, # 'с' - 6: 3, # 'т' - 14: 3, # 'у' - 39: 2, # 'ф' - 26: 3, # 'х' - 28: 3, # 'ц' - 22: 3, # 'ч' - 25: 3, # 'ш' - 29: 3, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 2, # 'э' - 27: 3, # 'ю' - 16: 3, # 'я' - }, - 21: { # 'б' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 1, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 2, # 'б' - 10: 2, # 'в' - 19: 1, # 'г' - 13: 2, # 'д' - 2: 3, # 'е' - 24: 2, # 'ж' - 20: 1, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 2, # 'к' - 8: 3, # 'л' - 12: 2, # 'м' - 5: 3, # 'н' - 1: 3, # 'о' - 15: 1, # 'п' - 9: 3, # 'р' - 7: 3, # 'с' - 6: 2, # 'т' - 14: 3, # 'у' - 39: 0, # 'ф' - 26: 2, # 'х' - 28: 1, # 'ц' - 22: 1, # 'ч' - 25: 2, # 'ш' - 29: 3, # 'щ' - 54: 2, # 'ъ' - 18: 3, # 'ы' - 17: 2, # 'ь' - 30: 1, # 'э' - 27: 2, # 'ю' - 16: 3, # 'я' - }, - 10: { # 'в' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 2, # 'б' - 10: 2, # 'в' - 19: 2, # 'г' - 13: 3, # 'д' - 2: 3, # 'е' - 24: 1, # 'ж' - 20: 3, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 3, # 'к' - 8: 3, # 'л' - 12: 2, # 'м' - 5: 3, # 'н' - 1: 3, # 'о' - 15: 3, # 'п' - 9: 3, # 'р' - 7: 3, # 'с' - 6: 3, # 'т' - 14: 3, # 'у' - 39: 1, # 'ф' - 26: 2, # 'х' - 28: 2, # 'ц' - 22: 2, # 'ч' - 25: 3, # 'ш' - 29: 2, # 'щ' - 54: 2, # 'ъ' - 18: 3, # 'ы' - 17: 3, # 'ь' - 30: 1, # 'э' - 27: 1, # 'ю' - 16: 3, # 'я' - }, - 19: { # 'г' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 1, # 'б' - 10: 2, # 'в' - 19: 1, # 'г' - 13: 3, # 'д' - 2: 3, # 'е' - 24: 0, # 'ж' - 20: 1, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 2, # 'к' - 8: 3, # 'л' - 12: 2, # 'м' - 5: 3, # 'н' - 1: 3, # 'о' - 15: 0, # 'п' - 9: 3, # 'р' - 7: 2, # 'с' - 6: 2, # 'т' - 14: 3, # 'у' - 39: 1, # 'ф' - 26: 1, # 'х' - 28: 1, # 'ц' - 22: 2, # 'ч' - 25: 1, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 1, # 'ы' - 17: 1, # 'ь' - 30: 1, # 'э' - 27: 1, # 'ю' - 16: 0, # 'я' - }, - 13: { # 'д' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 2, # 'б' - 10: 3, # 'в' - 19: 2, # 'г' - 13: 2, # 'д' - 2: 3, # 'е' - 24: 2, # 'ж' - 20: 2, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 3, # 'к' - 8: 3, # 'л' - 12: 2, # 'м' - 5: 3, # 'н' - 1: 3, # 'о' - 15: 2, # 'п' - 9: 3, # 'р' - 7: 3, # 'с' - 6: 3, # 'т' - 14: 3, # 'у' - 39: 1, # 'ф' - 26: 2, # 'х' - 28: 3, # 'ц' - 22: 2, # 'ч' - 25: 2, # 'ш' - 29: 1, # 'щ' - 54: 2, # 'ъ' - 18: 3, # 'ы' - 17: 3, # 'ь' - 30: 1, # 'э' - 27: 2, # 'ю' - 16: 3, # 'я' - }, - 2: { # 'е' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 2, # 'а' - 21: 3, # 'б' - 10: 3, # 'в' - 19: 3, # 'г' - 13: 3, # 'д' - 2: 3, # 'е' - 24: 3, # 'ж' - 20: 3, # 'з' - 4: 2, # 'и' - 23: 3, # 'й' - 11: 3, # 'к' - 8: 3, # 'л' - 12: 3, # 'м' - 5: 3, # 'н' - 1: 3, # 'о' - 15: 3, # 'п' - 9: 3, # 'р' - 7: 3, # 'с' - 6: 3, # 'т' - 14: 2, # 'у' - 39: 2, # 'ф' - 26: 3, # 'х' - 28: 3, # 'ц' - 22: 3, # 'ч' - 25: 3, # 'ш' - 29: 3, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 1, # 'э' - 27: 2, # 'ю' - 16: 3, # 'я' - }, - 24: { # 'ж' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 2, # 'б' - 10: 1, # 'в' - 19: 2, # 'г' - 13: 3, # 'д' - 2: 3, # 'е' - 24: 2, # 'ж' - 20: 1, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 2, # 'к' - 8: 2, # 'л' - 12: 1, # 'м' - 5: 3, # 'н' - 1: 2, # 'о' - 15: 1, # 'п' - 9: 2, # 'р' - 7: 2, # 'с' - 6: 1, # 'т' - 14: 3, # 'у' - 39: 1, # 'ф' - 26: 0, # 'х' - 28: 1, # 'ц' - 22: 2, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 1, # 'ы' - 17: 2, # 'ь' - 30: 1, # 'э' - 27: 1, # 'ю' - 16: 1, # 'я' - }, - 20: { # 'з' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 3, # 'б' - 10: 3, # 'в' - 19: 3, # 'г' - 13: 3, # 'д' - 2: 3, # 'е' - 24: 2, # 'ж' - 20: 2, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 3, # 'к' - 8: 3, # 'л' - 12: 3, # 'м' - 5: 3, # 'н' - 1: 3, # 'о' - 15: 0, # 'п' - 9: 3, # 'р' - 7: 2, # 'с' - 6: 2, # 'т' - 14: 3, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 1, # 'ц' - 22: 2, # 'ч' - 25: 1, # 'ш' - 29: 0, # 'щ' - 54: 2, # 'ъ' - 18: 3, # 'ы' - 17: 2, # 'ь' - 30: 1, # 'э' - 27: 1, # 'ю' - 16: 3, # 'я' - }, - 4: { # 'и' - 37: 1, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 1, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 3, # 'б' - 10: 3, # 'в' - 19: 3, # 'г' - 13: 3, # 'д' - 2: 3, # 'е' - 24: 3, # 'ж' - 20: 3, # 'з' - 4: 3, # 'и' - 23: 3, # 'й' - 11: 3, # 'к' - 8: 3, # 'л' - 12: 3, # 'м' - 5: 3, # 'н' - 1: 3, # 'о' - 15: 3, # 'п' - 9: 3, # 'р' - 7: 3, # 'с' - 6: 3, # 'т' - 14: 2, # 'у' - 39: 2, # 'ф' - 26: 3, # 'х' - 28: 3, # 'ц' - 22: 3, # 'ч' - 25: 3, # 'ш' - 29: 3, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 2, # 'э' - 27: 3, # 'ю' - 16: 3, # 'я' - }, - 23: { # 'й' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 1, # 'а' - 21: 1, # 'б' - 10: 1, # 'в' - 19: 2, # 'г' - 13: 3, # 'д' - 2: 2, # 'е' - 24: 0, # 'ж' - 20: 2, # 'з' - 4: 1, # 'и' - 23: 0, # 'й' - 11: 2, # 'к' - 8: 2, # 'л' - 12: 2, # 'м' - 5: 3, # 'н' - 1: 2, # 'о' - 15: 1, # 'п' - 9: 2, # 'р' - 7: 3, # 'с' - 6: 3, # 'т' - 14: 1, # 'у' - 39: 2, # 'ф' - 26: 1, # 'х' - 28: 2, # 'ц' - 22: 3, # 'ч' - 25: 2, # 'ш' - 29: 1, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 1, # 'э' - 27: 1, # 'ю' - 16: 2, # 'я' - }, - 11: { # 'к' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 1, # 'б' - 10: 3, # 'в' - 19: 1, # 'г' - 13: 1, # 'д' - 2: 3, # 'е' - 24: 2, # 'ж' - 20: 2, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 2, # 'к' - 8: 3, # 'л' - 12: 1, # 'м' - 5: 3, # 'н' - 1: 3, # 'о' - 15: 0, # 'п' - 9: 3, # 'р' - 7: 3, # 'с' - 6: 3, # 'т' - 14: 3, # 'у' - 39: 1, # 'ф' - 26: 2, # 'х' - 28: 2, # 'ц' - 22: 1, # 'ч' - 25: 2, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 1, # 'ы' - 17: 1, # 'ь' - 30: 1, # 'э' - 27: 1, # 'ю' - 16: 1, # 'я' - }, - 8: { # 'л' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 2, # 'б' - 10: 2, # 'в' - 19: 3, # 'г' - 13: 2, # 'д' - 2: 3, # 'е' - 24: 3, # 'ж' - 20: 2, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 3, # 'к' - 8: 3, # 'л' - 12: 2, # 'м' - 5: 3, # 'н' - 1: 3, # 'о' - 15: 2, # 'п' - 9: 1, # 'р' - 7: 3, # 'с' - 6: 2, # 'т' - 14: 3, # 'у' - 39: 2, # 'ф' - 26: 2, # 'х' - 28: 1, # 'ц' - 22: 3, # 'ч' - 25: 2, # 'ш' - 29: 1, # 'щ' - 54: 0, # 'ъ' - 18: 3, # 'ы' - 17: 3, # 'ь' - 30: 1, # 'э' - 27: 3, # 'ю' - 16: 3, # 'я' - }, - 12: { # 'м' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 2, # 'б' - 10: 2, # 'в' - 19: 2, # 'г' - 13: 1, # 'д' - 2: 3, # 'е' - 24: 1, # 'ж' - 20: 1, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 2, # 'к' - 8: 3, # 'л' - 12: 2, # 'м' - 5: 3, # 'н' - 1: 3, # 'о' - 15: 2, # 'п' - 9: 2, # 'р' - 7: 3, # 'с' - 6: 2, # 'т' - 14: 3, # 'у' - 39: 2, # 'ф' - 26: 2, # 'х' - 28: 2, # 'ц' - 22: 2, # 'ч' - 25: 1, # 'ш' - 29: 1, # 'щ' - 54: 0, # 'ъ' - 18: 3, # 'ы' - 17: 2, # 'ь' - 30: 2, # 'э' - 27: 1, # 'ю' - 16: 3, # 'я' - }, - 5: { # 'н' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 2, # 'б' - 10: 2, # 'в' - 19: 3, # 'г' - 13: 3, # 'д' - 2: 3, # 'е' - 24: 2, # 'ж' - 20: 2, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 3, # 'к' - 8: 2, # 'л' - 12: 1, # 'м' - 5: 3, # 'н' - 1: 3, # 'о' - 15: 1, # 'п' - 9: 2, # 'р' - 7: 3, # 'с' - 6: 3, # 'т' - 14: 3, # 'у' - 39: 2, # 'ф' - 26: 2, # 'х' - 28: 3, # 'ц' - 22: 3, # 'ч' - 25: 2, # 'ш' - 29: 2, # 'щ' - 54: 1, # 'ъ' - 18: 3, # 'ы' - 17: 3, # 'ь' - 30: 1, # 'э' - 27: 3, # 'ю' - 16: 3, # 'я' - }, - 1: { # 'о' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 2, # 'а' - 21: 3, # 'б' - 10: 3, # 'в' - 19: 3, # 'г' - 13: 3, # 'д' - 2: 3, # 'е' - 24: 3, # 'ж' - 20: 3, # 'з' - 4: 3, # 'и' - 23: 3, # 'й' - 11: 3, # 'к' - 8: 3, # 'л' - 12: 3, # 'м' - 5: 3, # 'н' - 1: 3, # 'о' - 15: 3, # 'п' - 9: 3, # 'р' - 7: 3, # 'с' - 6: 3, # 'т' - 14: 2, # 'у' - 39: 2, # 'ф' - 26: 3, # 'х' - 28: 2, # 'ц' - 22: 3, # 'ч' - 25: 3, # 'ш' - 29: 3, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 2, # 'э' - 27: 3, # 'ю' - 16: 3, # 'я' - }, - 15: { # 'п' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 1, # 'б' - 10: 0, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 3, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 2, # 'к' - 8: 3, # 'л' - 12: 1, # 'м' - 5: 3, # 'н' - 1: 3, # 'о' - 15: 2, # 'п' - 9: 3, # 'р' - 7: 2, # 'с' - 6: 2, # 'т' - 14: 3, # 'у' - 39: 1, # 'ф' - 26: 0, # 'х' - 28: 2, # 'ц' - 22: 2, # 'ч' - 25: 1, # 'ш' - 29: 1, # 'щ' - 54: 0, # 'ъ' - 18: 3, # 'ы' - 17: 2, # 'ь' - 30: 1, # 'э' - 27: 1, # 'ю' - 16: 3, # 'я' - }, - 9: { # 'р' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 2, # 'б' - 10: 3, # 'в' - 19: 3, # 'г' - 13: 3, # 'д' - 2: 3, # 'е' - 24: 3, # 'ж' - 20: 2, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 3, # 'к' - 8: 2, # 'л' - 12: 3, # 'м' - 5: 3, # 'н' - 1: 3, # 'о' - 15: 2, # 'п' - 9: 2, # 'р' - 7: 3, # 'с' - 6: 3, # 'т' - 14: 3, # 'у' - 39: 2, # 'ф' - 26: 3, # 'х' - 28: 2, # 'ц' - 22: 2, # 'ч' - 25: 3, # 'ш' - 29: 2, # 'щ' - 54: 0, # 'ъ' - 18: 3, # 'ы' - 17: 3, # 'ь' - 30: 2, # 'э' - 27: 2, # 'ю' - 16: 3, # 'я' - }, - 7: { # 'с' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 1, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 2, # 'б' - 10: 3, # 'в' - 19: 2, # 'г' - 13: 3, # 'д' - 2: 3, # 'е' - 24: 2, # 'ж' - 20: 2, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 3, # 'к' - 8: 3, # 'л' - 12: 3, # 'м' - 5: 3, # 'н' - 1: 3, # 'о' - 15: 3, # 'п' - 9: 3, # 'р' - 7: 3, # 'с' - 6: 3, # 'т' - 14: 3, # 'у' - 39: 2, # 'ф' - 26: 3, # 'х' - 28: 2, # 'ц' - 22: 3, # 'ч' - 25: 2, # 'ш' - 29: 1, # 'щ' - 54: 2, # 'ъ' - 18: 3, # 'ы' - 17: 3, # 'ь' - 30: 2, # 'э' - 27: 3, # 'ю' - 16: 3, # 'я' - }, - 6: { # 'т' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 2, # 'б' - 10: 3, # 'в' - 19: 2, # 'г' - 13: 2, # 'д' - 2: 3, # 'е' - 24: 1, # 'ж' - 20: 1, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 3, # 'к' - 8: 3, # 'л' - 12: 2, # 'м' - 5: 3, # 'н' - 1: 3, # 'о' - 15: 2, # 'п' - 9: 3, # 'р' - 7: 3, # 'с' - 6: 2, # 'т' - 14: 3, # 'у' - 39: 2, # 'ф' - 26: 2, # 'х' - 28: 2, # 'ц' - 22: 2, # 'ч' - 25: 2, # 'ш' - 29: 2, # 'щ' - 54: 2, # 'ъ' - 18: 3, # 'ы' - 17: 3, # 'ь' - 30: 2, # 'э' - 27: 2, # 'ю' - 16: 3, # 'я' - }, - 14: { # 'у' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 2, # 'а' - 21: 3, # 'б' - 10: 3, # 'в' - 19: 3, # 'г' - 13: 3, # 'д' - 2: 3, # 'е' - 24: 3, # 'ж' - 20: 3, # 'з' - 4: 2, # 'и' - 23: 2, # 'й' - 11: 3, # 'к' - 8: 3, # 'л' - 12: 3, # 'м' - 5: 3, # 'н' - 1: 2, # 'о' - 15: 3, # 'п' - 9: 3, # 'р' - 7: 3, # 'с' - 6: 3, # 'т' - 14: 1, # 'у' - 39: 2, # 'ф' - 26: 3, # 'х' - 28: 2, # 'ц' - 22: 3, # 'ч' - 25: 3, # 'ш' - 29: 3, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 2, # 'э' - 27: 3, # 'ю' - 16: 2, # 'я' - }, - 39: { # 'ф' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 1, # 'б' - 10: 0, # 'в' - 19: 1, # 'г' - 13: 0, # 'д' - 2: 3, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 1, # 'к' - 8: 2, # 'л' - 12: 1, # 'м' - 5: 1, # 'н' - 1: 3, # 'о' - 15: 1, # 'п' - 9: 2, # 'р' - 7: 2, # 'с' - 6: 2, # 'т' - 14: 2, # 'у' - 39: 2, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 1, # 'ч' - 25: 1, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 2, # 'ы' - 17: 1, # 'ь' - 30: 2, # 'э' - 27: 1, # 'ю' - 16: 1, # 'я' - }, - 26: { # 'х' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 0, # 'б' - 10: 3, # 'в' - 19: 1, # 'г' - 13: 1, # 'д' - 2: 2, # 'е' - 24: 0, # 'ж' - 20: 1, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 1, # 'к' - 8: 2, # 'л' - 12: 2, # 'м' - 5: 3, # 'н' - 1: 3, # 'о' - 15: 1, # 'п' - 9: 3, # 'р' - 7: 2, # 'с' - 6: 2, # 'т' - 14: 2, # 'у' - 39: 1, # 'ф' - 26: 1, # 'х' - 28: 1, # 'ц' - 22: 1, # 'ч' - 25: 2, # 'ш' - 29: 0, # 'щ' - 54: 1, # 'ъ' - 18: 0, # 'ы' - 17: 1, # 'ь' - 30: 1, # 'э' - 27: 1, # 'ю' - 16: 0, # 'я' - }, - 28: { # 'ц' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 1, # 'б' - 10: 2, # 'в' - 19: 1, # 'г' - 13: 1, # 'д' - 2: 3, # 'е' - 24: 0, # 'ж' - 20: 1, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 2, # 'к' - 8: 1, # 'л' - 12: 1, # 'м' - 5: 1, # 'н' - 1: 3, # 'о' - 15: 0, # 'п' - 9: 1, # 'р' - 7: 0, # 'с' - 6: 1, # 'т' - 14: 3, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 1, # 'ц' - 22: 0, # 'ч' - 25: 1, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 3, # 'ы' - 17: 1, # 'ь' - 30: 0, # 'э' - 27: 1, # 'ю' - 16: 0, # 'я' - }, - 22: { # 'ч' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 1, # 'б' - 10: 1, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 3, # 'е' - 24: 1, # 'ж' - 20: 0, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 3, # 'к' - 8: 2, # 'л' - 12: 1, # 'м' - 5: 3, # 'н' - 1: 2, # 'о' - 15: 0, # 'п' - 9: 2, # 'р' - 7: 1, # 'с' - 6: 3, # 'т' - 14: 3, # 'у' - 39: 1, # 'ф' - 26: 1, # 'х' - 28: 0, # 'ц' - 22: 1, # 'ч' - 25: 2, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 3, # 'ь' - 30: 0, # 'э' - 27: 0, # 'ю' - 16: 0, # 'я' - }, - 25: { # 'ш' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 1, # 'б' - 10: 2, # 'в' - 19: 1, # 'г' - 13: 0, # 'д' - 2: 3, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 3, # 'к' - 8: 3, # 'л' - 12: 2, # 'м' - 5: 3, # 'н' - 1: 3, # 'о' - 15: 2, # 'п' - 9: 2, # 'р' - 7: 1, # 'с' - 6: 2, # 'т' - 14: 3, # 'у' - 39: 2, # 'ф' - 26: 1, # 'х' - 28: 1, # 'ц' - 22: 1, # 'ч' - 25: 1, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 3, # 'ь' - 30: 1, # 'э' - 27: 1, # 'ю' - 16: 0, # 'я' - }, - 29: { # 'щ' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 3, # 'а' - 21: 0, # 'б' - 10: 1, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 3, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 3, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 0, # 'л' - 12: 1, # 'м' - 5: 2, # 'н' - 1: 1, # 'о' - 15: 0, # 'п' - 9: 2, # 'р' - 7: 0, # 'с' - 6: 0, # 'т' - 14: 2, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 2, # 'ь' - 30: 0, # 'э' - 27: 0, # 'ю' - 16: 0, # 'я' - }, - 54: { # 'ъ' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 0, # 'а' - 21: 0, # 'б' - 10: 0, # 'в' - 19: 0, # 'г' - 13: 0, # 'д' - 2: 2, # 'е' - 24: 0, # 'ж' - 20: 0, # 'з' - 4: 0, # 'и' - 23: 0, # 'й' - 11: 0, # 'к' - 8: 0, # 'л' - 12: 0, # 'м' - 5: 0, # 'н' - 1: 0, # 'о' - 15: 0, # 'п' - 9: 0, # 'р' - 7: 0, # 'с' - 6: 0, # 'т' - 14: 0, # 'у' - 39: 0, # 'ф' - 26: 0, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 0, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 0, # 'э' - 27: 1, # 'ю' - 16: 2, # 'я' - }, - 18: { # 'ы' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 0, # 'а' - 21: 3, # 'б' - 10: 3, # 'в' - 19: 2, # 'г' - 13: 2, # 'д' - 2: 3, # 'е' - 24: 2, # 'ж' - 20: 2, # 'з' - 4: 2, # 'и' - 23: 3, # 'й' - 11: 3, # 'к' - 8: 3, # 'л' - 12: 3, # 'м' - 5: 3, # 'н' - 1: 1, # 'о' - 15: 3, # 'п' - 9: 3, # 'р' - 7: 3, # 'с' - 6: 3, # 'т' - 14: 1, # 'у' - 39: 0, # 'ф' - 26: 3, # 'х' - 28: 2, # 'ц' - 22: 3, # 'ч' - 25: 3, # 'ш' - 29: 2, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 0, # 'э' - 27: 0, # 'ю' - 16: 2, # 'я' - }, - 17: { # 'ь' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 0, # 'а' - 21: 2, # 'б' - 10: 2, # 'в' - 19: 2, # 'г' - 13: 2, # 'д' - 2: 3, # 'е' - 24: 1, # 'ж' - 20: 3, # 'з' - 4: 2, # 'и' - 23: 0, # 'й' - 11: 3, # 'к' - 8: 0, # 'л' - 12: 3, # 'м' - 5: 3, # 'н' - 1: 2, # 'о' - 15: 2, # 'п' - 9: 1, # 'р' - 7: 3, # 'с' - 6: 2, # 'т' - 14: 0, # 'у' - 39: 2, # 'ф' - 26: 1, # 'х' - 28: 2, # 'ц' - 22: 2, # 'ч' - 25: 3, # 'ш' - 29: 2, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 1, # 'э' - 27: 3, # 'ю' - 16: 3, # 'я' - }, - 30: { # 'э' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 1, # 'М' - 31: 1, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 1, # 'Р' - 32: 1, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 1, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 0, # 'а' - 21: 1, # 'б' - 10: 1, # 'в' - 19: 1, # 'г' - 13: 2, # 'д' - 2: 1, # 'е' - 24: 0, # 'ж' - 20: 1, # 'з' - 4: 0, # 'и' - 23: 2, # 'й' - 11: 2, # 'к' - 8: 2, # 'л' - 12: 2, # 'м' - 5: 2, # 'н' - 1: 0, # 'о' - 15: 2, # 'п' - 9: 2, # 'р' - 7: 2, # 'с' - 6: 3, # 'т' - 14: 1, # 'у' - 39: 2, # 'ф' - 26: 1, # 'х' - 28: 0, # 'ц' - 22: 0, # 'ч' - 25: 1, # 'ш' - 29: 0, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 1, # 'э' - 27: 1, # 'ю' - 16: 1, # 'я' - }, - 27: { # 'ю' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 2, # 'а' - 21: 3, # 'б' - 10: 1, # 'в' - 19: 2, # 'г' - 13: 3, # 'д' - 2: 1, # 'е' - 24: 2, # 'ж' - 20: 2, # 'з' - 4: 1, # 'и' - 23: 1, # 'й' - 11: 2, # 'к' - 8: 2, # 'л' - 12: 2, # 'м' - 5: 2, # 'н' - 1: 1, # 'о' - 15: 2, # 'п' - 9: 2, # 'р' - 7: 3, # 'с' - 6: 3, # 'т' - 14: 0, # 'у' - 39: 1, # 'ф' - 26: 2, # 'х' - 28: 2, # 'ц' - 22: 2, # 'ч' - 25: 2, # 'ш' - 29: 3, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 1, # 'э' - 27: 2, # 'ю' - 16: 1, # 'я' - }, - 16: { # 'я' - 37: 0, # 'А' - 44: 0, # 'Б' - 33: 0, # 'В' - 46: 0, # 'Г' - 41: 0, # 'Д' - 48: 0, # 'Е' - 56: 0, # 'Ж' - 51: 0, # 'З' - 42: 0, # 'И' - 60: 0, # 'Й' - 36: 0, # 'К' - 49: 0, # 'Л' - 38: 0, # 'М' - 31: 0, # 'Н' - 34: 0, # 'О' - 35: 0, # 'П' - 45: 0, # 'Р' - 32: 0, # 'С' - 40: 0, # 'Т' - 52: 0, # 'У' - 53: 0, # 'Ф' - 55: 0, # 'Х' - 58: 0, # 'Ц' - 50: 0, # 'Ч' - 57: 0, # 'Ш' - 63: 0, # 'Щ' - 62: 0, # 'Ы' - 61: 0, # 'Ь' - 47: 0, # 'Э' - 59: 0, # 'Ю' - 43: 0, # 'Я' - 3: 0, # 'а' - 21: 2, # 'б' - 10: 3, # 'в' - 19: 2, # 'г' - 13: 3, # 'д' - 2: 3, # 'е' - 24: 3, # 'ж' - 20: 3, # 'з' - 4: 2, # 'и' - 23: 2, # 'й' - 11: 3, # 'к' - 8: 3, # 'л' - 12: 3, # 'м' - 5: 3, # 'н' - 1: 0, # 'о' - 15: 2, # 'п' - 9: 2, # 'р' - 7: 3, # 'с' - 6: 3, # 'т' - 14: 1, # 'у' - 39: 1, # 'ф' - 26: 3, # 'х' - 28: 2, # 'ц' - 22: 2, # 'ч' - 25: 2, # 'ш' - 29: 3, # 'щ' - 54: 0, # 'ъ' - 18: 0, # 'ы' - 17: 0, # 'ь' - 30: 0, # 'э' - 27: 2, # 'ю' - 16: 2, # 'я' - }, -} - -# 255: Undefined characters that did not exist in training text -# 254: Carriage/Return -# 253: symbol (punctuation) that does not belong to word -# 252: 0 - 9 -# 251: Control characters - -# Character Mapping Table(s): -IBM866_RUSSIAN_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 142, # 'A' - 66: 143, # 'B' - 67: 144, # 'C' - 68: 145, # 'D' - 69: 146, # 'E' - 70: 147, # 'F' - 71: 148, # 'G' - 72: 149, # 'H' - 73: 150, # 'I' - 74: 151, # 'J' - 75: 152, # 'K' - 76: 74, # 'L' - 77: 153, # 'M' - 78: 75, # 'N' - 79: 154, # 'O' - 80: 155, # 'P' - 81: 156, # 'Q' - 82: 157, # 'R' - 83: 158, # 'S' - 84: 159, # 'T' - 85: 160, # 'U' - 86: 161, # 'V' - 87: 162, # 'W' - 88: 163, # 'X' - 89: 164, # 'Y' - 90: 165, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 71, # 'a' - 98: 172, # 'b' - 99: 66, # 'c' - 100: 173, # 'd' - 101: 65, # 'e' - 102: 174, # 'f' - 103: 76, # 'g' - 104: 175, # 'h' - 105: 64, # 'i' - 106: 176, # 'j' - 107: 177, # 'k' - 108: 77, # 'l' - 109: 72, # 'm' - 110: 178, # 'n' - 111: 69, # 'o' - 112: 67, # 'p' - 113: 179, # 'q' - 114: 78, # 'r' - 115: 73, # 's' - 116: 180, # 't' - 117: 181, # 'u' - 118: 79, # 'v' - 119: 182, # 'w' - 120: 183, # 'x' - 121: 184, # 'y' - 122: 185, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 37, # 'А' - 129: 44, # 'Б' - 130: 33, # 'В' - 131: 46, # 'Г' - 132: 41, # 'Д' - 133: 48, # 'Е' - 134: 56, # 'Ж' - 135: 51, # 'З' - 136: 42, # 'И' - 137: 60, # 'Й' - 138: 36, # 'К' - 139: 49, # 'Л' - 140: 38, # 'М' - 141: 31, # 'Н' - 142: 34, # 'О' - 143: 35, # 'П' - 144: 45, # 'Р' - 145: 32, # 'С' - 146: 40, # 'Т' - 147: 52, # 'У' - 148: 53, # 'Ф' - 149: 55, # 'Х' - 150: 58, # 'Ц' - 151: 50, # 'Ч' - 152: 57, # 'Ш' - 153: 63, # 'Щ' - 154: 70, # 'Ъ' - 155: 62, # 'Ы' - 156: 61, # 'Ь' - 157: 47, # 'Э' - 158: 59, # 'Ю' - 159: 43, # 'Я' - 160: 3, # 'а' - 161: 21, # 'б' - 162: 10, # 'в' - 163: 19, # 'г' - 164: 13, # 'д' - 165: 2, # 'е' - 166: 24, # 'ж' - 167: 20, # 'з' - 168: 4, # 'и' - 169: 23, # 'й' - 170: 11, # 'к' - 171: 8, # 'л' - 172: 12, # 'м' - 173: 5, # 'н' - 174: 1, # 'о' - 175: 15, # 'п' - 176: 191, # '░' - 177: 192, # '▒' - 178: 193, # '▓' - 179: 194, # '│' - 180: 195, # '┤' - 181: 196, # '╡' - 182: 197, # '╢' - 183: 198, # '╖' - 184: 199, # '╕' - 185: 200, # '╣' - 186: 201, # '║' - 187: 202, # '╗' - 188: 203, # '╝' - 189: 204, # '╜' - 190: 205, # '╛' - 191: 206, # '┐' - 192: 207, # '└' - 193: 208, # '┴' - 194: 209, # '┬' - 195: 210, # '├' - 196: 211, # '─' - 197: 212, # '┼' - 198: 213, # '╞' - 199: 214, # '╟' - 200: 215, # '╚' - 201: 216, # '╔' - 202: 217, # '╩' - 203: 218, # '╦' - 204: 219, # '╠' - 205: 220, # '═' - 206: 221, # '╬' - 207: 222, # '╧' - 208: 223, # '╨' - 209: 224, # '╤' - 210: 225, # '╥' - 211: 226, # '╙' - 212: 227, # '╘' - 213: 228, # '╒' - 214: 229, # '╓' - 215: 230, # '╫' - 216: 231, # '╪' - 217: 232, # '┘' - 218: 233, # '┌' - 219: 234, # '█' - 220: 235, # '▄' - 221: 236, # '▌' - 222: 237, # '▐' - 223: 238, # '▀' - 224: 9, # 'р' - 225: 7, # 'с' - 226: 6, # 'т' - 227: 14, # 'у' - 228: 39, # 'ф' - 229: 26, # 'х' - 230: 28, # 'ц' - 231: 22, # 'ч' - 232: 25, # 'ш' - 233: 29, # 'щ' - 234: 54, # 'ъ' - 235: 18, # 'ы' - 236: 17, # 'ь' - 237: 30, # 'э' - 238: 27, # 'ю' - 239: 16, # 'я' - 240: 239, # 'Ё' - 241: 68, # 'ё' - 242: 240, # 'Є' - 243: 241, # 'є' - 244: 242, # 'Ї' - 245: 243, # 'ї' - 246: 244, # 'Ў' - 247: 245, # 'ў' - 248: 246, # '°' - 249: 247, # '∙' - 250: 248, # '·' - 251: 249, # '√' - 252: 250, # '№' - 253: 251, # '¤' - 254: 252, # '■' - 255: 255, # '\xa0' -} - -IBM866_RUSSIAN_MODEL = SingleByteCharSetModel( - charset_name="IBM866", - language="Russian", - char_to_order_map=IBM866_RUSSIAN_CHAR_TO_ORDER, - language_model=RUSSIAN_LANG_MODEL, - typical_positive_ratio=0.976601, - keep_ascii_letters=False, - alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё", -) - -WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 142, # 'A' - 66: 143, # 'B' - 67: 144, # 'C' - 68: 145, # 'D' - 69: 146, # 'E' - 70: 147, # 'F' - 71: 148, # 'G' - 72: 149, # 'H' - 73: 150, # 'I' - 74: 151, # 'J' - 75: 152, # 'K' - 76: 74, # 'L' - 77: 153, # 'M' - 78: 75, # 'N' - 79: 154, # 'O' - 80: 155, # 'P' - 81: 156, # 'Q' - 82: 157, # 'R' - 83: 158, # 'S' - 84: 159, # 'T' - 85: 160, # 'U' - 86: 161, # 'V' - 87: 162, # 'W' - 88: 163, # 'X' - 89: 164, # 'Y' - 90: 165, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 71, # 'a' - 98: 172, # 'b' - 99: 66, # 'c' - 100: 173, # 'd' - 101: 65, # 'e' - 102: 174, # 'f' - 103: 76, # 'g' - 104: 175, # 'h' - 105: 64, # 'i' - 106: 176, # 'j' - 107: 177, # 'k' - 108: 77, # 'l' - 109: 72, # 'm' - 110: 178, # 'n' - 111: 69, # 'o' - 112: 67, # 'p' - 113: 179, # 'q' - 114: 78, # 'r' - 115: 73, # 's' - 116: 180, # 't' - 117: 181, # 'u' - 118: 79, # 'v' - 119: 182, # 'w' - 120: 183, # 'x' - 121: 184, # 'y' - 122: 185, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 191, # 'Ђ' - 129: 192, # 'Ѓ' - 130: 193, # '‚' - 131: 194, # 'ѓ' - 132: 195, # '„' - 133: 196, # '…' - 134: 197, # '†' - 135: 198, # '‡' - 136: 199, # '€' - 137: 200, # '‰' - 138: 201, # 'Љ' - 139: 202, # '‹' - 140: 203, # 'Њ' - 141: 204, # 'Ќ' - 142: 205, # 'Ћ' - 143: 206, # 'Џ' - 144: 207, # 'ђ' - 145: 208, # '‘' - 146: 209, # '’' - 147: 210, # '“' - 148: 211, # '”' - 149: 212, # '•' - 150: 213, # '–' - 151: 214, # '—' - 152: 215, # None - 153: 216, # '™' - 154: 217, # 'љ' - 155: 218, # '›' - 156: 219, # 'њ' - 157: 220, # 'ќ' - 158: 221, # 'ћ' - 159: 222, # 'џ' - 160: 223, # '\xa0' - 161: 224, # 'Ў' - 162: 225, # 'ў' - 163: 226, # 'Ј' - 164: 227, # '¤' - 165: 228, # 'Ґ' - 166: 229, # '¦' - 167: 230, # '§' - 168: 231, # 'Ё' - 169: 232, # '©' - 170: 233, # 'Є' - 171: 234, # '«' - 172: 235, # '¬' - 173: 236, # '\xad' - 174: 237, # '®' - 175: 238, # 'Ї' - 176: 239, # '°' - 177: 240, # '±' - 178: 241, # 'І' - 179: 242, # 'і' - 180: 243, # 'ґ' - 181: 244, # 'µ' - 182: 245, # '¶' - 183: 246, # '·' - 184: 68, # 'ё' - 185: 247, # '№' - 186: 248, # 'є' - 187: 249, # '»' - 188: 250, # 'ј' - 189: 251, # 'Ѕ' - 190: 252, # 'ѕ' - 191: 253, # 'ї' - 192: 37, # 'А' - 193: 44, # 'Б' - 194: 33, # 'В' - 195: 46, # 'Г' - 196: 41, # 'Д' - 197: 48, # 'Е' - 198: 56, # 'Ж' - 199: 51, # 'З' - 200: 42, # 'И' - 201: 60, # 'Й' - 202: 36, # 'К' - 203: 49, # 'Л' - 204: 38, # 'М' - 205: 31, # 'Н' - 206: 34, # 'О' - 207: 35, # 'П' - 208: 45, # 'Р' - 209: 32, # 'С' - 210: 40, # 'Т' - 211: 52, # 'У' - 212: 53, # 'Ф' - 213: 55, # 'Х' - 214: 58, # 'Ц' - 215: 50, # 'Ч' - 216: 57, # 'Ш' - 217: 63, # 'Щ' - 218: 70, # 'Ъ' - 219: 62, # 'Ы' - 220: 61, # 'Ь' - 221: 47, # 'Э' - 222: 59, # 'Ю' - 223: 43, # 'Я' - 224: 3, # 'а' - 225: 21, # 'б' - 226: 10, # 'в' - 227: 19, # 'г' - 228: 13, # 'д' - 229: 2, # 'е' - 230: 24, # 'ж' - 231: 20, # 'з' - 232: 4, # 'и' - 233: 23, # 'й' - 234: 11, # 'к' - 235: 8, # 'л' - 236: 12, # 'м' - 237: 5, # 'н' - 238: 1, # 'о' - 239: 15, # 'п' - 240: 9, # 'р' - 241: 7, # 'с' - 242: 6, # 'т' - 243: 14, # 'у' - 244: 39, # 'ф' - 245: 26, # 'х' - 246: 28, # 'ц' - 247: 22, # 'ч' - 248: 25, # 'ш' - 249: 29, # 'щ' - 250: 54, # 'ъ' - 251: 18, # 'ы' - 252: 17, # 'ь' - 253: 30, # 'э' - 254: 27, # 'ю' - 255: 16, # 'я' -} - -WINDOWS_1251_RUSSIAN_MODEL = SingleByteCharSetModel( - charset_name="windows-1251", - language="Russian", - char_to_order_map=WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER, - language_model=RUSSIAN_LANG_MODEL, - typical_positive_ratio=0.976601, - keep_ascii_letters=False, - alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё", -) - -IBM855_RUSSIAN_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 142, # 'A' - 66: 143, # 'B' - 67: 144, # 'C' - 68: 145, # 'D' - 69: 146, # 'E' - 70: 147, # 'F' - 71: 148, # 'G' - 72: 149, # 'H' - 73: 150, # 'I' - 74: 151, # 'J' - 75: 152, # 'K' - 76: 74, # 'L' - 77: 153, # 'M' - 78: 75, # 'N' - 79: 154, # 'O' - 80: 155, # 'P' - 81: 156, # 'Q' - 82: 157, # 'R' - 83: 158, # 'S' - 84: 159, # 'T' - 85: 160, # 'U' - 86: 161, # 'V' - 87: 162, # 'W' - 88: 163, # 'X' - 89: 164, # 'Y' - 90: 165, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 71, # 'a' - 98: 172, # 'b' - 99: 66, # 'c' - 100: 173, # 'd' - 101: 65, # 'e' - 102: 174, # 'f' - 103: 76, # 'g' - 104: 175, # 'h' - 105: 64, # 'i' - 106: 176, # 'j' - 107: 177, # 'k' - 108: 77, # 'l' - 109: 72, # 'm' - 110: 178, # 'n' - 111: 69, # 'o' - 112: 67, # 'p' - 113: 179, # 'q' - 114: 78, # 'r' - 115: 73, # 's' - 116: 180, # 't' - 117: 181, # 'u' - 118: 79, # 'v' - 119: 182, # 'w' - 120: 183, # 'x' - 121: 184, # 'y' - 122: 185, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 191, # 'ђ' - 129: 192, # 'Ђ' - 130: 193, # 'ѓ' - 131: 194, # 'Ѓ' - 132: 68, # 'ё' - 133: 195, # 'Ё' - 134: 196, # 'є' - 135: 197, # 'Є' - 136: 198, # 'ѕ' - 137: 199, # 'Ѕ' - 138: 200, # 'і' - 139: 201, # 'І' - 140: 202, # 'ї' - 141: 203, # 'Ї' - 142: 204, # 'ј' - 143: 205, # 'Ј' - 144: 206, # 'љ' - 145: 207, # 'Љ' - 146: 208, # 'њ' - 147: 209, # 'Њ' - 148: 210, # 'ћ' - 149: 211, # 'Ћ' - 150: 212, # 'ќ' - 151: 213, # 'Ќ' - 152: 214, # 'ў' - 153: 215, # 'Ў' - 154: 216, # 'џ' - 155: 217, # 'Џ' - 156: 27, # 'ю' - 157: 59, # 'Ю' - 158: 54, # 'ъ' - 159: 70, # 'Ъ' - 160: 3, # 'а' - 161: 37, # 'А' - 162: 21, # 'б' - 163: 44, # 'Б' - 164: 28, # 'ц' - 165: 58, # 'Ц' - 166: 13, # 'д' - 167: 41, # 'Д' - 168: 2, # 'е' - 169: 48, # 'Е' - 170: 39, # 'ф' - 171: 53, # 'Ф' - 172: 19, # 'г' - 173: 46, # 'Г' - 174: 218, # '«' - 175: 219, # '»' - 176: 220, # '░' - 177: 221, # '▒' - 178: 222, # '▓' - 179: 223, # '│' - 180: 224, # '┤' - 181: 26, # 'х' - 182: 55, # 'Х' - 183: 4, # 'и' - 184: 42, # 'И' - 185: 225, # '╣' - 186: 226, # '║' - 187: 227, # '╗' - 188: 228, # '╝' - 189: 23, # 'й' - 190: 60, # 'Й' - 191: 229, # '┐' - 192: 230, # '└' - 193: 231, # '┴' - 194: 232, # '┬' - 195: 233, # '├' - 196: 234, # '─' - 197: 235, # '┼' - 198: 11, # 'к' - 199: 36, # 'К' - 200: 236, # '╚' - 201: 237, # '╔' - 202: 238, # '╩' - 203: 239, # '╦' - 204: 240, # '╠' - 205: 241, # '═' - 206: 242, # '╬' - 207: 243, # '¤' - 208: 8, # 'л' - 209: 49, # 'Л' - 210: 12, # 'м' - 211: 38, # 'М' - 212: 5, # 'н' - 213: 31, # 'Н' - 214: 1, # 'о' - 215: 34, # 'О' - 216: 15, # 'п' - 217: 244, # '┘' - 218: 245, # '┌' - 219: 246, # '█' - 220: 247, # '▄' - 221: 35, # 'П' - 222: 16, # 'я' - 223: 248, # '▀' - 224: 43, # 'Я' - 225: 9, # 'р' - 226: 45, # 'Р' - 227: 7, # 'с' - 228: 32, # 'С' - 229: 6, # 'т' - 230: 40, # 'Т' - 231: 14, # 'у' - 232: 52, # 'У' - 233: 24, # 'ж' - 234: 56, # 'Ж' - 235: 10, # 'в' - 236: 33, # 'В' - 237: 17, # 'ь' - 238: 61, # 'Ь' - 239: 249, # '№' - 240: 250, # '\xad' - 241: 18, # 'ы' - 242: 62, # 'Ы' - 243: 20, # 'з' - 244: 51, # 'З' - 245: 25, # 'ш' - 246: 57, # 'Ш' - 247: 30, # 'э' - 248: 47, # 'Э' - 249: 29, # 'щ' - 250: 63, # 'Щ' - 251: 22, # 'ч' - 252: 50, # 'Ч' - 253: 251, # '§' - 254: 252, # '■' - 255: 255, # '\xa0' -} - -IBM855_RUSSIAN_MODEL = SingleByteCharSetModel( - charset_name="IBM855", - language="Russian", - char_to_order_map=IBM855_RUSSIAN_CHAR_TO_ORDER, - language_model=RUSSIAN_LANG_MODEL, - typical_positive_ratio=0.976601, - keep_ascii_letters=False, - alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё", -) - -KOI8_R_RUSSIAN_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 142, # 'A' - 66: 143, # 'B' - 67: 144, # 'C' - 68: 145, # 'D' - 69: 146, # 'E' - 70: 147, # 'F' - 71: 148, # 'G' - 72: 149, # 'H' - 73: 150, # 'I' - 74: 151, # 'J' - 75: 152, # 'K' - 76: 74, # 'L' - 77: 153, # 'M' - 78: 75, # 'N' - 79: 154, # 'O' - 80: 155, # 'P' - 81: 156, # 'Q' - 82: 157, # 'R' - 83: 158, # 'S' - 84: 159, # 'T' - 85: 160, # 'U' - 86: 161, # 'V' - 87: 162, # 'W' - 88: 163, # 'X' - 89: 164, # 'Y' - 90: 165, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 71, # 'a' - 98: 172, # 'b' - 99: 66, # 'c' - 100: 173, # 'd' - 101: 65, # 'e' - 102: 174, # 'f' - 103: 76, # 'g' - 104: 175, # 'h' - 105: 64, # 'i' - 106: 176, # 'j' - 107: 177, # 'k' - 108: 77, # 'l' - 109: 72, # 'm' - 110: 178, # 'n' - 111: 69, # 'o' - 112: 67, # 'p' - 113: 179, # 'q' - 114: 78, # 'r' - 115: 73, # 's' - 116: 180, # 't' - 117: 181, # 'u' - 118: 79, # 'v' - 119: 182, # 'w' - 120: 183, # 'x' - 121: 184, # 'y' - 122: 185, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 191, # '─' - 129: 192, # '│' - 130: 193, # '┌' - 131: 194, # '┐' - 132: 195, # '└' - 133: 196, # '┘' - 134: 197, # '├' - 135: 198, # '┤' - 136: 199, # '┬' - 137: 200, # '┴' - 138: 201, # '┼' - 139: 202, # '▀' - 140: 203, # '▄' - 141: 204, # '█' - 142: 205, # '▌' - 143: 206, # '▐' - 144: 207, # '░' - 145: 208, # '▒' - 146: 209, # '▓' - 147: 210, # '⌠' - 148: 211, # '■' - 149: 212, # '∙' - 150: 213, # '√' - 151: 214, # '≈' - 152: 215, # '≤' - 153: 216, # '≥' - 154: 217, # '\xa0' - 155: 218, # '⌡' - 156: 219, # '°' - 157: 220, # '²' - 158: 221, # '·' - 159: 222, # '÷' - 160: 223, # '═' - 161: 224, # '║' - 162: 225, # '╒' - 163: 68, # 'ё' - 164: 226, # '╓' - 165: 227, # '╔' - 166: 228, # '╕' - 167: 229, # '╖' - 168: 230, # '╗' - 169: 231, # '╘' - 170: 232, # '╙' - 171: 233, # '╚' - 172: 234, # '╛' - 173: 235, # '╜' - 174: 236, # '╝' - 175: 237, # '╞' - 176: 238, # '╟' - 177: 239, # '╠' - 178: 240, # '╡' - 179: 241, # 'Ё' - 180: 242, # '╢' - 181: 243, # '╣' - 182: 244, # '╤' - 183: 245, # '╥' - 184: 246, # '╦' - 185: 247, # '╧' - 186: 248, # '╨' - 187: 249, # '╩' - 188: 250, # '╪' - 189: 251, # '╫' - 190: 252, # '╬' - 191: 253, # '©' - 192: 27, # 'ю' - 193: 3, # 'а' - 194: 21, # 'б' - 195: 28, # 'ц' - 196: 13, # 'д' - 197: 2, # 'е' - 198: 39, # 'ф' - 199: 19, # 'г' - 200: 26, # 'х' - 201: 4, # 'и' - 202: 23, # 'й' - 203: 11, # 'к' - 204: 8, # 'л' - 205: 12, # 'м' - 206: 5, # 'н' - 207: 1, # 'о' - 208: 15, # 'п' - 209: 16, # 'я' - 210: 9, # 'р' - 211: 7, # 'с' - 212: 6, # 'т' - 213: 14, # 'у' - 214: 24, # 'ж' - 215: 10, # 'в' - 216: 17, # 'ь' - 217: 18, # 'ы' - 218: 20, # 'з' - 219: 25, # 'ш' - 220: 30, # 'э' - 221: 29, # 'щ' - 222: 22, # 'ч' - 223: 54, # 'ъ' - 224: 59, # 'Ю' - 225: 37, # 'А' - 226: 44, # 'Б' - 227: 58, # 'Ц' - 228: 41, # 'Д' - 229: 48, # 'Е' - 230: 53, # 'Ф' - 231: 46, # 'Г' - 232: 55, # 'Х' - 233: 42, # 'И' - 234: 60, # 'Й' - 235: 36, # 'К' - 236: 49, # 'Л' - 237: 38, # 'М' - 238: 31, # 'Н' - 239: 34, # 'О' - 240: 35, # 'П' - 241: 43, # 'Я' - 242: 45, # 'Р' - 243: 32, # 'С' - 244: 40, # 'Т' - 245: 52, # 'У' - 246: 56, # 'Ж' - 247: 33, # 'В' - 248: 61, # 'Ь' - 249: 62, # 'Ы' - 250: 51, # 'З' - 251: 57, # 'Ш' - 252: 47, # 'Э' - 253: 63, # 'Щ' - 254: 50, # 'Ч' - 255: 70, # 'Ъ' -} - -KOI8_R_RUSSIAN_MODEL = SingleByteCharSetModel( - charset_name="KOI8-R", - language="Russian", - char_to_order_map=KOI8_R_RUSSIAN_CHAR_TO_ORDER, - language_model=RUSSIAN_LANG_MODEL, - typical_positive_ratio=0.976601, - keep_ascii_letters=False, - alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё", -) - -MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 142, # 'A' - 66: 143, # 'B' - 67: 144, # 'C' - 68: 145, # 'D' - 69: 146, # 'E' - 70: 147, # 'F' - 71: 148, # 'G' - 72: 149, # 'H' - 73: 150, # 'I' - 74: 151, # 'J' - 75: 152, # 'K' - 76: 74, # 'L' - 77: 153, # 'M' - 78: 75, # 'N' - 79: 154, # 'O' - 80: 155, # 'P' - 81: 156, # 'Q' - 82: 157, # 'R' - 83: 158, # 'S' - 84: 159, # 'T' - 85: 160, # 'U' - 86: 161, # 'V' - 87: 162, # 'W' - 88: 163, # 'X' - 89: 164, # 'Y' - 90: 165, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 71, # 'a' - 98: 172, # 'b' - 99: 66, # 'c' - 100: 173, # 'd' - 101: 65, # 'e' - 102: 174, # 'f' - 103: 76, # 'g' - 104: 175, # 'h' - 105: 64, # 'i' - 106: 176, # 'j' - 107: 177, # 'k' - 108: 77, # 'l' - 109: 72, # 'm' - 110: 178, # 'n' - 111: 69, # 'o' - 112: 67, # 'p' - 113: 179, # 'q' - 114: 78, # 'r' - 115: 73, # 's' - 116: 180, # 't' - 117: 181, # 'u' - 118: 79, # 'v' - 119: 182, # 'w' - 120: 183, # 'x' - 121: 184, # 'y' - 122: 185, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 37, # 'А' - 129: 44, # 'Б' - 130: 33, # 'В' - 131: 46, # 'Г' - 132: 41, # 'Д' - 133: 48, # 'Е' - 134: 56, # 'Ж' - 135: 51, # 'З' - 136: 42, # 'И' - 137: 60, # 'Й' - 138: 36, # 'К' - 139: 49, # 'Л' - 140: 38, # 'М' - 141: 31, # 'Н' - 142: 34, # 'О' - 143: 35, # 'П' - 144: 45, # 'Р' - 145: 32, # 'С' - 146: 40, # 'Т' - 147: 52, # 'У' - 148: 53, # 'Ф' - 149: 55, # 'Х' - 150: 58, # 'Ц' - 151: 50, # 'Ч' - 152: 57, # 'Ш' - 153: 63, # 'Щ' - 154: 70, # 'Ъ' - 155: 62, # 'Ы' - 156: 61, # 'Ь' - 157: 47, # 'Э' - 158: 59, # 'Ю' - 159: 43, # 'Я' - 160: 191, # '†' - 161: 192, # '°' - 162: 193, # 'Ґ' - 163: 194, # '£' - 164: 195, # '§' - 165: 196, # '•' - 166: 197, # '¶' - 167: 198, # 'І' - 168: 199, # '®' - 169: 200, # '©' - 170: 201, # '™' - 171: 202, # 'Ђ' - 172: 203, # 'ђ' - 173: 204, # '≠' - 174: 205, # 'Ѓ' - 175: 206, # 'ѓ' - 176: 207, # '∞' - 177: 208, # '±' - 178: 209, # '≤' - 179: 210, # '≥' - 180: 211, # 'і' - 181: 212, # 'µ' - 182: 213, # 'ґ' - 183: 214, # 'Ј' - 184: 215, # 'Є' - 185: 216, # 'є' - 186: 217, # 'Ї' - 187: 218, # 'ї' - 188: 219, # 'Љ' - 189: 220, # 'љ' - 190: 221, # 'Њ' - 191: 222, # 'њ' - 192: 223, # 'ј' - 193: 224, # 'Ѕ' - 194: 225, # '¬' - 195: 226, # '√' - 196: 227, # 'ƒ' - 197: 228, # '≈' - 198: 229, # '∆' - 199: 230, # '«' - 200: 231, # '»' - 201: 232, # '…' - 202: 233, # '\xa0' - 203: 234, # 'Ћ' - 204: 235, # 'ћ' - 205: 236, # 'Ќ' - 206: 237, # 'ќ' - 207: 238, # 'ѕ' - 208: 239, # '–' - 209: 240, # '—' - 210: 241, # '“' - 211: 242, # '”' - 212: 243, # '‘' - 213: 244, # '’' - 214: 245, # '÷' - 215: 246, # '„' - 216: 247, # 'Ў' - 217: 248, # 'ў' - 218: 249, # 'Џ' - 219: 250, # 'џ' - 220: 251, # '№' - 221: 252, # 'Ё' - 222: 68, # 'ё' - 223: 16, # 'я' - 224: 3, # 'а' - 225: 21, # 'б' - 226: 10, # 'в' - 227: 19, # 'г' - 228: 13, # 'д' - 229: 2, # 'е' - 230: 24, # 'ж' - 231: 20, # 'з' - 232: 4, # 'и' - 233: 23, # 'й' - 234: 11, # 'к' - 235: 8, # 'л' - 236: 12, # 'м' - 237: 5, # 'н' - 238: 1, # 'о' - 239: 15, # 'п' - 240: 9, # 'р' - 241: 7, # 'с' - 242: 6, # 'т' - 243: 14, # 'у' - 244: 39, # 'ф' - 245: 26, # 'х' - 246: 28, # 'ц' - 247: 22, # 'ч' - 248: 25, # 'ш' - 249: 29, # 'щ' - 250: 54, # 'ъ' - 251: 18, # 'ы' - 252: 17, # 'ь' - 253: 30, # 'э' - 254: 27, # 'ю' - 255: 255, # '€' -} - -MACCYRILLIC_RUSSIAN_MODEL = SingleByteCharSetModel( - charset_name="MacCyrillic", - language="Russian", - char_to_order_map=MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER, - language_model=RUSSIAN_LANG_MODEL, - typical_positive_ratio=0.976601, - keep_ascii_letters=False, - alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё", -) - -ISO_8859_5_RUSSIAN_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 142, # 'A' - 66: 143, # 'B' - 67: 144, # 'C' - 68: 145, # 'D' - 69: 146, # 'E' - 70: 147, # 'F' - 71: 148, # 'G' - 72: 149, # 'H' - 73: 150, # 'I' - 74: 151, # 'J' - 75: 152, # 'K' - 76: 74, # 'L' - 77: 153, # 'M' - 78: 75, # 'N' - 79: 154, # 'O' - 80: 155, # 'P' - 81: 156, # 'Q' - 82: 157, # 'R' - 83: 158, # 'S' - 84: 159, # 'T' - 85: 160, # 'U' - 86: 161, # 'V' - 87: 162, # 'W' - 88: 163, # 'X' - 89: 164, # 'Y' - 90: 165, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 71, # 'a' - 98: 172, # 'b' - 99: 66, # 'c' - 100: 173, # 'd' - 101: 65, # 'e' - 102: 174, # 'f' - 103: 76, # 'g' - 104: 175, # 'h' - 105: 64, # 'i' - 106: 176, # 'j' - 107: 177, # 'k' - 108: 77, # 'l' - 109: 72, # 'm' - 110: 178, # 'n' - 111: 69, # 'o' - 112: 67, # 'p' - 113: 179, # 'q' - 114: 78, # 'r' - 115: 73, # 's' - 116: 180, # 't' - 117: 181, # 'u' - 118: 79, # 'v' - 119: 182, # 'w' - 120: 183, # 'x' - 121: 184, # 'y' - 122: 185, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 191, # '\x80' - 129: 192, # '\x81' - 130: 193, # '\x82' - 131: 194, # '\x83' - 132: 195, # '\x84' - 133: 196, # '\x85' - 134: 197, # '\x86' - 135: 198, # '\x87' - 136: 199, # '\x88' - 137: 200, # '\x89' - 138: 201, # '\x8a' - 139: 202, # '\x8b' - 140: 203, # '\x8c' - 141: 204, # '\x8d' - 142: 205, # '\x8e' - 143: 206, # '\x8f' - 144: 207, # '\x90' - 145: 208, # '\x91' - 146: 209, # '\x92' - 147: 210, # '\x93' - 148: 211, # '\x94' - 149: 212, # '\x95' - 150: 213, # '\x96' - 151: 214, # '\x97' - 152: 215, # '\x98' - 153: 216, # '\x99' - 154: 217, # '\x9a' - 155: 218, # '\x9b' - 156: 219, # '\x9c' - 157: 220, # '\x9d' - 158: 221, # '\x9e' - 159: 222, # '\x9f' - 160: 223, # '\xa0' - 161: 224, # 'Ё' - 162: 225, # 'Ђ' - 163: 226, # 'Ѓ' - 164: 227, # 'Є' - 165: 228, # 'Ѕ' - 166: 229, # 'І' - 167: 230, # 'Ї' - 168: 231, # 'Ј' - 169: 232, # 'Љ' - 170: 233, # 'Њ' - 171: 234, # 'Ћ' - 172: 235, # 'Ќ' - 173: 236, # '\xad' - 174: 237, # 'Ў' - 175: 238, # 'Џ' - 176: 37, # 'А' - 177: 44, # 'Б' - 178: 33, # 'В' - 179: 46, # 'Г' - 180: 41, # 'Д' - 181: 48, # 'Е' - 182: 56, # 'Ж' - 183: 51, # 'З' - 184: 42, # 'И' - 185: 60, # 'Й' - 186: 36, # 'К' - 187: 49, # 'Л' - 188: 38, # 'М' - 189: 31, # 'Н' - 190: 34, # 'О' - 191: 35, # 'П' - 192: 45, # 'Р' - 193: 32, # 'С' - 194: 40, # 'Т' - 195: 52, # 'У' - 196: 53, # 'Ф' - 197: 55, # 'Х' - 198: 58, # 'Ц' - 199: 50, # 'Ч' - 200: 57, # 'Ш' - 201: 63, # 'Щ' - 202: 70, # 'Ъ' - 203: 62, # 'Ы' - 204: 61, # 'Ь' - 205: 47, # 'Э' - 206: 59, # 'Ю' - 207: 43, # 'Я' - 208: 3, # 'а' - 209: 21, # 'б' - 210: 10, # 'в' - 211: 19, # 'г' - 212: 13, # 'д' - 213: 2, # 'е' - 214: 24, # 'ж' - 215: 20, # 'з' - 216: 4, # 'и' - 217: 23, # 'й' - 218: 11, # 'к' - 219: 8, # 'л' - 220: 12, # 'м' - 221: 5, # 'н' - 222: 1, # 'о' - 223: 15, # 'п' - 224: 9, # 'р' - 225: 7, # 'с' - 226: 6, # 'т' - 227: 14, # 'у' - 228: 39, # 'ф' - 229: 26, # 'х' - 230: 28, # 'ц' - 231: 22, # 'ч' - 232: 25, # 'ш' - 233: 29, # 'щ' - 234: 54, # 'ъ' - 235: 18, # 'ы' - 236: 17, # 'ь' - 237: 30, # 'э' - 238: 27, # 'ю' - 239: 16, # 'я' - 240: 239, # '№' - 241: 68, # 'ё' - 242: 240, # 'ђ' - 243: 241, # 'ѓ' - 244: 242, # 'є' - 245: 243, # 'ѕ' - 246: 244, # 'і' - 247: 245, # 'ї' - 248: 246, # 'ј' - 249: 247, # 'љ' - 250: 248, # 'њ' - 251: 249, # 'ћ' - 252: 250, # 'ќ' - 253: 251, # '§' - 254: 252, # 'ў' - 255: 255, # 'џ' -} - -ISO_8859_5_RUSSIAN_MODEL = SingleByteCharSetModel( - charset_name="ISO-8859-5", - language="Russian", - char_to_order_map=ISO_8859_5_RUSSIAN_CHAR_TO_ORDER, - language_model=RUSSIAN_LANG_MODEL, - typical_positive_ratio=0.976601, - keep_ascii_letters=False, - alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё", -) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/color_triplet.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/color_triplet.py deleted file mode 100644 index 02cab328251af9bfa809981aaa44933c407e2cd7..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/color_triplet.py +++ /dev/null @@ -1,38 +0,0 @@ -from typing import NamedTuple, Tuple - - -class ColorTriplet(NamedTuple): - """The red, green, and blue components of a color.""" - - red: int - """Red component in 0 to 255 range.""" - green: int - """Green component in 0 to 255 range.""" - blue: int - """Blue component in 0 to 255 range.""" - - @property - def hex(self) -> str: - """get the color triplet in CSS style.""" - red, green, blue = self - return f"#{red:02x}{green:02x}{blue:02x}" - - @property - def rgb(self) -> str: - """The color in RGB format. - - Returns: - str: An rgb color, e.g. ``"rgb(100,23,255)"``. - """ - red, green, blue = self - return f"rgb({red},{green},{blue})" - - @property - def normalized(self) -> Tuple[float, float, float]: - """Convert components into floats between 0 and 1. - - Returns: - Tuple[float, float, float]: A tuple of three normalized colour components. - """ - red, green, blue = self - return red / 255.0, green / 255.0, blue / 255.0 diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/tenacity/after.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/tenacity/after.py deleted file mode 100644 index 574c9bcea6e222ea8283a3c8dafbda15a2893fe1..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/tenacity/after.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2016 Julien Danjou -# Copyright 2016 Joshua Harlow -# Copyright 2013-2014 Ray Holder -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import typing - -from pip._vendor.tenacity import _utils - -if typing.TYPE_CHECKING: - import logging - - from pip._vendor.tenacity import RetryCallState - - -def after_nothing(retry_state: "RetryCallState") -> None: - """After call strategy that does nothing.""" - - -def after_log( - logger: "logging.Logger", - log_level: int, - sec_format: str = "%0.3f", -) -> typing.Callable[["RetryCallState"], None]: - """After call strategy that logs to some logger the finished attempt.""" - - def log_it(retry_state: "RetryCallState") -> None: - if retry_state.fn is None: - # NOTE(sileht): can't really happen, but we must please mypy - fn_name = "" - else: - fn_name = _utils.get_callback_name(retry_state.fn) - logger.log( - log_level, - f"Finished call to '{fn_name}' " - f"after {sec_format % retry_state.seconds_since_start}(s), " - f"this was the {_utils.to_ordinal(retry_state.attempt_number)} time calling it.", - ) - - return log_it diff --git a/spaces/Audio-AGI/AudioSep/models/CLAP/training/main.py b/spaces/Audio-AGI/AudioSep/models/CLAP/training/main.py deleted file mode 100644 index 3b563a5d001be7adfbe779dee7ad8ac49aadc50d..0000000000000000000000000000000000000000 --- a/spaces/Audio-AGI/AudioSep/models/CLAP/training/main.py +++ /dev/null @@ -1,596 +0,0 @@ -from inspect import getargs -import logging -import os -import random -from datetime import datetime -import bisect -import copy -import numpy as np -import torch -import torch.backends.cudnn as cudnn -from torch import optim -from torch.cuda.amp import GradScaler -import faulthandler -import pathlib - -try: - import wandb -except ImportError: - wandb = None - -try: - import torch.utils.tensorboard as tensorboard -except ImportError: - tensorboard = None - -try: - import horovod.torch as hvd -except ImportError: - hvd = None - -from open_clip import create_model_and_transforms, trace_model, create_model -from training.data import get_data -from training.distributed import is_master, init_distributed_device, world_info_from_env -from training.logger import setup_logging -from training.params import parse_args -from training.scheduler import cosine_lr -from training.train import train_one_epoch, evaluate -from open_clip.utils import dataset_split, get_optimizer - - -def maintain_ckpts(args, startidx, all_idx_len): - for i in reversed(range(startidx, all_idx_len)): - if os.path.exists(os.path.join(args.checkpoint_path, f"epoch_top_{i}.pt")): - os.rename( - os.path.join(args.checkpoint_path, f"epoch_top_{i}.pt"), - os.path.join(args.checkpoint_path, f"epoch_top_{i+1}.pt"), - ) - if os.path.exists( - os.path.join(args.checkpoint_path, f"epoch_top_{all_idx_len}.pt") - ): - os.remove(os.path.join(args.checkpoint_path, f"epoch_top_{all_idx_len}.pt")) - return - - -def update_top_k_performance( - new_metrics_inputs, current_top_k_ckpt_metrics, args, ckpt, bignumbetter=True -): - """ - Record the top-k performance of the current epoch. - current_top_k_metrics is a dictionary of the form: {1: top_1_ckpt_measure, 2: top_2_ckpt_measure, ...} - """ - if isinstance(new_metrics_inputs, (list, tuple)): - new_metrics_inputs = np.mean(new_metrics_inputs) - return update_top_k_performance( - new_metrics_inputs, - current_top_k_ckpt_metrics, - args=args, - ckpt=ckpt, - bignumbetter=bignumbetter, - ) - elif isinstance(new_metrics_inputs, dict): - new_metrics_inputs = np.mean(list(new_metrics_inputs.values())) - return update_top_k_performance( - new_metrics_inputs, - current_top_k_ckpt_metrics, - args=args, - ckpt=ckpt, - bignumbetter=bignumbetter, - ) - elif isinstance(new_metrics_inputs, (float, int)): - update_flag = {k: False for k in current_top_k_ckpt_metrics.keys()} - sorted_keys = sorted(current_top_k_ckpt_metrics.keys()) - sorted_values = sorted( - current_top_k_ckpt_metrics.values(), reverse=bignumbetter - ) - sorted_values_ = copy.deepcopy(sorted_values) - sorted_values.append(new_metrics_inputs) - sorted_values = sorted(sorted_values, reverse=bignumbetter) - sorted_values = sorted_values[:-1] - - if sorted_values == sorted_values_: - return current_top_k_ckpt_metrics, new_metrics_inputs - else: - for i in range(len(sorted_keys)): - if current_top_k_ckpt_metrics[sorted_keys[i]] != sorted_values[i]: - current_top_k_ckpt_metrics[sorted_keys[i]] = sorted_values[i] - update_flag[sorted_keys[i]] = True - for i in range(len(update_flag)): - if update_flag[i]: - maintain_ckpts(args, i, len(sorted_keys)) - torch.save( - ckpt, - os.path.join(args.checkpoint_path, f"epoch_top_{i}.pt"), - ) - break - return current_top_k_ckpt_metrics, new_metrics_inputs - - -# def updateifNone(a, b): -# a = b if None else a -# return a - - -def is_pretrained_params(n): - return ( - n.startswith("transformer") - or n in ["positional_embedding", "text_projection"] - or n.startswith("token_embedding") - or n.startswith("ln_final") - or n.startswith("logit_scale_t") - ) - - -def random_seed(seed=42, rank=0): - torch.manual_seed(seed + rank) - np.random.seed(seed + rank) - random.seed(seed + rank) - - -def main(): - args = parse_args() - # sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule? - args.amodel = args.amodel.replace("/", "-") - # download sizes.json file - - # (yusong): the below two lines are for debug - # print("setting up faulthandler") - # faulthandler.register(10) - - random.seed(args.seed) - torch.manual_seed(args.seed) - torch.cuda.manual_seed(args.seed) - torch.cuda.manual_seed_all(args.seed) - np.random.seed(args.seed) - if args.tmodel == "bert" or args.tmodel == "roberta" or args.tmodel == "bart": - assert ( - args.pretrained == "" or args.pretrained is None - ), "bert/roberta/bart text encoder does not support pretrained models." - - # get the name of the experiments - if args.name is None: - args.name = "-".join( - [ - datetime.now().strftime("%Y_%m_%d-%H_%M_%S"), - f"model_{args.amodel}", - f"lr_{args.lr}", - f"b_{args.batch_size}", - f"j_{args.workers}", - f"p_{args.precision}", - ] - ) - - # discover initial world args early so we can log properly - args.distributed = False - args.local_rank, args.rank, args.world_size = world_info_from_env() - - if args.remotedata and is_master(args): - for dataset_name in args.datasetnames: - for split in dataset_split[dataset_name]: - if not os.path.exists(f"./json_files/{dataset_name}/{split}"): - os.makedirs(f"./json_files/{dataset_name}/{split}") - os.system( - f"aws s3 cp s3://s-laion-audio/webdataset_tar/{dataset_name}/{split}/sizes.json ./json_files/{dataset_name}/{split}/sizes.json" - ) - - args.log_path = None - if is_master(args, local=args.log_local): - log_base_path = os.path.join(args.logs, args.name) - os.makedirs(log_base_path, exist_ok=True) - log_filename = f"out-{args.rank}" if args.log_local else "out.log" - args.log_path = os.path.join(log_base_path, log_filename) - if os.path.exists(args.log_path): - print( - "Error. Experiment already exists. Use --name {} to specify a new experiment." - ) - return -1 - - # Set logger - args.log_level = logging.DEBUG if args.debug else logging.INFO - setup_logging(args.log_path, args.log_level) - - # fully initialize distributed device environment - device = init_distributed_device(args) - - args.wandb = "wandb" in args.report_to or "all" in args.report_to - args.tensorboard = "tensorboard" in args.report_to or "all" in args.report_to - if is_master(args): - args.tensorboard_path = ( - os.path.join(args.logs, args.name, "tensorboard") - if args.tensorboard - else "" - ) - args.checkpoint_path = os.path.join(args.logs, args.name, "checkpoints") - for dirname in [args.tensorboard_path, args.checkpoint_path]: - if dirname: - os.makedirs(dirname, exist_ok=True) - else: - args.tensorboard_path = "" - args.checkpoint_path = "" - - if args.copy_codebase: - copy_codebase(args) - - assert args.precision in ["amp", "fp16", "fp32"] - if args.precision == "fp16": - logging.warning( - "It is recommended to use AMP mixed-precision instead of FP16. " - "FP16 support needs further verification and tuning, especially for train." - ) - - if args.horovod: - logging.info( - f"Running in horovod mode with multiple processes / nodes. Device: {args.device}." - f"Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}." - ) - elif args.distributed: - logging.info( - f"Running in distributed mode with multiple processes. Device: {args.device}." - f"Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}." - ) - else: - logging.info(f"Running with a single process. Device {args.device}.") - - logging.info(f"openai cache dir: {os.path.expanduser(args.openai_model_cache_dir)}") - - model, model_cfg = create_model( - args.amodel, - args.tmodel, - args.pretrained, - precision=args.precision, - device=device, - jit=args.torchscript, - force_quick_gelu=args.force_quick_gelu, - openai_model_cache_dir=os.path.expanduser(args.openai_model_cache_dir), - skip_params=True, - pretrained_audio=args.pretrained_audio, - pretrained_text=args.pretrained_text, - enable_fusion=args.enable_fusion, - fusion_type=args.fusion_type, - ) - - if args.horovod: - with torch.no_grad(): - for param in model.parameters(): - param.set_(param.contiguous()) - - if args.trace: - model = trace_model(model, batch_size=args.batch_size, device=device) - - if is_master(args): - logging.info("Model:") - logging.info(f"{str(model)}") - logging.info("Params:") - params_file = os.path.join(args.logs, args.name, "params.txt") - with open(params_file, "w") as f: - for name in sorted(vars(args)): - val = getattr(args, name) - logging.info(f" {name}: {val}") - f.write(f"{name}: {val}\n") - - if args.distributed and not args.horovod: - if args.use_bn_sync: - model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) - ddp_args = {} - if args.ddp_static_graph: - # this doesn't exist in older PyTorch, arg only added if enabled - ddp_args["static_graph"] = True - model = torch.nn.parallel.DistributedDataParallel( - model, device_ids=[device], find_unused_parameters=True, **ddp_args - ) - - data = get_data(args, model_cfg) - assert len(data), "At least one train or eval dataset must be specified." - if args.trace: - assert "train" not in data, "Cannot train with traced model" - - exclude = ( - lambda n, p: p.ndim < 2 - or "bn" in n - or "ln" in n - or "bias" in n - or "logit_scale" in n - ) - include = lambda n, p: not exclude(n, p) - - named_parameters = list(model.named_parameters()) - - # freeze text encoder - text_freeze_parameters = [p for n, p in named_parameters if "text_branch" in n] - - if args.freeze_text: - print("Freeze Text!!!!") - for k in text_freeze_parameters: - k.requires_grad = False - - gain_or_bias_params = [ - p for n, p in named_parameters if exclude(n, p) and p.requires_grad - ] - rest_params = [p for n, p in named_parameters if include(n, p) and p.requires_grad] - - # set wd-related params to 0 if use adam optimizer - if args.optimizer == "adam": - args.wd = 0 - args.wd_pretrained = 0 - args.wd_new = 0 - - if args.train_data is None: - optimizer = None - scheduler = None - else: - total_steps = data["train"].dataloader.num_batches * args.epochs - - if args.split_opt: - for x in ["lr", "beta1", "beta2", "eps", "wd"]: - for y in ["_new", "_pretrained"]: - if getattr(args, x + y) is None: - setattr(args, x + y, getattr(args, x)) - - gain_or_bias_pretrained_params = [ - p - for n, p in named_parameters - if (exclude(n, p) and p.requires_grad) and is_pretrained_params(n) - ] - rest_pretrained_params = [ - p - for n, p in named_parameters - if (include(n, p) and p.requires_grad) and is_pretrained_params(n) - ] - gain_or_bias_new_params = [ - p - for n, p in named_parameters - if (exclude(n, p) and p.requires_grad) and (not is_pretrained_params(n)) - ] - rest_new_params = [ - p - for n, p in named_parameters - if (include(n, p) and p.requires_grad) and (not is_pretrained_params(n)) - ] - pretrained_params_optimizer = get_optimizer( - [ - {"params": gain_or_bias_pretrained_params, "weight_decay": 0.0}, - { - "params": rest_pretrained_params, - "weight_decay": args.wd_pretrained, - }, - ], - lr=args.lr_pretrained, - betas=(args.beta1_pretrained, args.beta2_pretrained), - eps=args.eps_pretrained, - momentum=args.momentum_pretrained, - optimizer_name=args.optimizer, - ) - pretrained_params_scheduler = cosine_lr( - pretrained_params_optimizer, - args.lr_pretrained, - args.warmup, - total_steps, - ) - new_params_optimizer = get_optimizer( - [ - {"params": gain_or_bias_new_params, "weight_decay": 0.0}, - {"params": rest_new_params, "weight_decay": args.wd_new}, - ], - lr=args.lr_new, - betas=(args.beta1_new, args.beta2_new), - eps=args.eps_new, - momentum=args.momentum_new, - optimizer_name=args.optimizer, - ) - - new_params_scheduler = cosine_lr( - new_params_optimizer, args.lr_new, args.warmup, total_steps - ) - - optimizer = { - "pretrained": pretrained_params_optimizer, - "new": new_params_optimizer, - } - scheduler = { - "pretrained": pretrained_params_scheduler, - "new": new_params_scheduler, - } - - if args.horovod: - pretrained_params_optimizer = hvd.DistributedOptimizer( - pretrained_params_optimizer, - named_parameters=model.named_parameters(), - ) - new_params_optimizer = hvd.DistributedOptimizer( - new_params_optimizer, named_parameters=model.named_parameters() - ) - hvd.broadcast_parameters(model.state_dict(), root_rank=0) - hvd.broadcast_optimizer_state(pretrained_params_optimizer, root_rank=0) - hvd.broadcast_optimizer_state(new_params_optimizer, root_rank=0) - else: - optimizer = get_optimizer( - [ - {"params": gain_or_bias_params, "weight_decay": 0.0}, - {"params": rest_params, "weight_decay": args.wd}, - ], - lr=args.lr, - betas=(args.beta1, args.beta2), - eps=args.eps, - momentum=args.momentum, - optimizer_name=args.optimizer, - ) - - scheduler = cosine_lr(optimizer, args.lr, args.warmup, total_steps) - - if args.horovod: - optimizer = hvd.DistributedOptimizer( - optimizer, named_parameters=model.named_parameters() - ) - hvd.broadcast_parameters(model.state_dict(), root_rank=0) - hvd.broadcast_optimizer_state(optimizer, root_rank=0) - - scaler = GradScaler() if args.precision == "amp" else None - - # optionally resume from a checkpoint - start_epoch = 0 - if args.resume is not None: - if os.path.isfile(args.resume): - checkpoint = torch.load(args.resume, map_location=device) - if "epoch" in checkpoint: - # resuming a train checkpoint w/ epoch and optimizer state - start_epoch = checkpoint["epoch"] - sd = checkpoint["state_dict"] - if not args.distributed and next(iter(sd.items()))[0].startswith( - "module" - ): - sd = {k[len("module.") :]: v for k, v in sd.items()} - model.load_state_dict(sd) - if args.split_opt: - if optimizer is not None: - for k, o_ in optimizer.items(): - o_.load_state_dict(checkpoint[k + "_" + "optimizer"]) - if optimizer is not None: - optimizer.load_state_dict(checkpoint["optimizer"]) - if scaler is not None and "scaler" in checkpoint: - scaler.load_state_dict(checkpoint["scaler"]) - logging.info( - f"=> resuming checkpoint '{args.resume}' (epoch {start_epoch})" - ) - else: - # loading a bare (model only) checkpoint for fine-tune or evaluation - model.load_state_dict(checkpoint) - logging.info( - f"=> loaded checkpoint '{args.resume}' (epoch {start_epoch})" - ) - if args.freeze_text: - print("Freeze Text!!!!") - for k in text_freeze_parameters: - k.requires_grad = False - else: - logging.info("=> no checkpoint found at '{}'".format(args.resume)) - - cudnn.benchmark = True - cudnn.deterministic = False - - # determine if this worker should save logs and checkpoints. only do so if it is rank == 0 - args.save_logs = args.logs and args.logs.lower() != "none" and is_master(args) - writer = None - if args.save_logs and args.tensorboard: - assert tensorboard is not None, "Please install tensorboard." - writer = tensorboard.SummaryWriter(args.tensorboard_path) - - if args.wandb and is_master(args): - assert wandb is not None, "Please install wandb." - logging.debug("Starting wandb.") - args.train_sz = data["train"].dataloader.num_samples - if args.val_data is not None: - args.val_sz = data["val"].dataloader.num_samples - # you will have to configure this for your project! - wandb.init( - project="clap", - notes=args.wandb_notes, - name=args.wandb_notes, - tags=[], - config=vars(args), - ) - if args.debug: - wandb.watch(model, log="all") - wandb.save(params_file) - logging.debug("Finished loading wandb.") - - if "train" not in data: - evaluate(model, data, start_epoch, args, writer) - return - elif start_epoch == 0 and "val" in data and not args.no_eval: - evaluate(model, data, 0, args, writer) - # print(f'rank {args.rank}, Start First Evaluation')# (yusong): for debug - if args.save_top_performance: - current_top_k_ckpt_metrics = { - i: 0 for i in range(args.save_top_performance) - } # initialize the top-k metric for ckpts to 0 - - # print(f'rank {args.rank}, Start Training') # (yusong): for debug - for epoch in range(start_epoch, args.epochs): - # freeze the text param after (include) args.freeze_text_after, this is -1 by default - if epoch == args.freeze_text_after: - print("Text pretrained parameters are freezed since this epoch.") - for k in text_freeze_parameters: - k.requires_grad = False - if is_master(args): - logging.info(f"Start epoch {epoch}") - - train_one_epoch(model, data, epoch, optimizer, scaler, scheduler, args, writer) - completed_epoch = epoch + 1 - - if ( - any(v in data for v in ("val", "imagenet-val", "imagenet-v2")) - and not args.no_eval - ): - metrics = evaluate(model, data, completed_epoch, args, writer) - if args.save_top_performance: - top_k_dataset = args.top_k_checkpoint_select_dataset - top_k_metric = args.top_k_checkpoint_select_metric - filtered_metrics = [ - v - for k, v in metrics.items() - if top_k_metric in k and top_k_dataset in k - ] # check all R@10 metrics (all dataset) and use it to update the ckpt - # Saving checkpoints. - if args.save_logs: - if args.split_opt: - opt_dict = { - k + "_" + "optimizer": v.state_dict() for k, v in optimizer.items() - } - else: - opt_dict = {"optimizer": optimizer.state_dict()} - checkpoint_dict = { - "epoch": completed_epoch, - "name": args.name, - "state_dict": model.state_dict(), - } - checkpoint_dict.update(opt_dict) - if scaler is not None: - checkpoint_dict["scaler"] = scaler.state_dict() - - if completed_epoch == args.epochs or ( - args.save_frequency > 0 and (completed_epoch % args.save_frequency) == 0 - ): - torch.save( - checkpoint_dict, - os.path.join(args.checkpoint_path, f"epoch_{completed_epoch}.pt"), - ) - if args.save_most_recent: - torch.save( - checkpoint_dict, - os.path.join(args.checkpoint_path, f"epoch_latest.pt"), - ) - if args.save_top_performance and not args.no_eval: - update_top_k_performance( - filtered_metrics, - current_top_k_ckpt_metrics, - args, - checkpoint_dict, - bignumbetter=True, - ) - - if args.wandb and is_master(args): - wandb.finish() - - -def copy_codebase(args): - from shutil import copytree, ignore_patterns - - new_code_path = os.path.join(args.logs, args.name, "code") - if os.path.exists(new_code_path): - print( - f"Error. Experiment already exists at {new_code_path}. Use --name to specify a new experiment." - ) - return -1 - print(f"Copying codebase to {new_code_path}") - current_code_path = os.path.realpath(__file__) - for _ in range(3): - current_code_path = os.path.dirname(current_code_path) - copytree( - current_code_path, new_code_path, ignore=ignore_patterns("log", "logs", "wandb") - ) - print("Done copying code.") - return 1 - - -if __name__ == "__main__": - main() diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/structures/test_keypoints.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/structures/test_keypoints.py deleted file mode 100644 index adc616e42341343e503afcbe181dbfae3f8ea063..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/structures/test_keypoints.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import unittest -import torch - -from detectron2.structures.keypoints import Keypoints - - -class TestKeypoints(unittest.TestCase): - def test_cat_keypoints(self): - keypoints1 = Keypoints(torch.rand(2, 21, 3)) - keypoints2 = Keypoints(torch.rand(4, 21, 3)) - - cat_keypoints = keypoints1.cat([keypoints1, keypoints2]) - self.assertTrue(torch.all(cat_keypoints.tensor[:2] == keypoints1.tensor).item()) - self.assertTrue(torch.all(cat_keypoints.tensor[2:] == keypoints2.tensor).item()) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/AyushP/PolicyChatBot/README.md b/spaces/AyushP/PolicyChatBot/README.md deleted file mode 100644 index 9966f0d9dd5f7e1d50882e76d6195c66b33b290a..0000000000000000000000000000000000000000 --- a/spaces/AyushP/PolicyChatBot/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: PolicyChatBot -emoji: 🏃 -colorFrom: gray -colorTo: gray -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Banbri/zcvzcv/src/app/engine/censorship.ts b/spaces/Banbri/zcvzcv/src/app/engine/censorship.ts deleted file mode 100644 index ae4cc0b98b1cc09b9dda0aed35767bb7faee3b6e..0000000000000000000000000000000000000000 --- a/spaces/Banbri/zcvzcv/src/app/engine/censorship.ts +++ /dev/null @@ -1,184 +0,0 @@ - -// I don't want to be banned by Replicate because bad actors are asking -// for some naked anime stuff or whatever -// I also want to avoid a PR scandal due to some bad user generated content - -import { computeSecretFingerprint } from "@/lib/computeSecretFingerprint" - -// those keywords have been generated by looking at the logs of the panorama and the AI Comic Factory -// those are real requests some users tried to attempt.. :| - -const chickens = [ - "fcb4dacbd99b21368c50f29c1d47071c87cf2225ab9192282c785460391cd365", - "68840b60ac27eacaa7afe17e898d3c4a2dc71acff8c74d6782c1bcaafd14963d", - "67f745224fd6e1a7a3a244514d5807fcc994cbb62ca4ec8fa44cd14244a515ae", - "681fea565117808c6dbe002520d2cfeeb3e5c67e68630afb4a453449a9da587b", - "2f3d913b3db9e15a930aac43eb2d6fe8817db8e4bcf37794bf0227b06b718d1b", - "922a700b807e4994df82eba2b48a6ac131fe8d8d1035d06b3592d622fb232161", - "cb69ee6774eafcc720adb1f689d28acbb9f47998cbea0299ec66a58dedf91c37" -] - -const ducks = [ - "1c52cb20c0cbc76349fa63232b982bd394cf0850ebc17240dcf33c19fb15a26d", - "e1d4de9b8d464d7da07c276b63a42c1c9922224f0a6cab6b0826427ce4a7461a", - "0be3174bfb1a48a65875c2f035b1ae14fbc8f232f55785018de0cfe2132fa952", - "0f174769641b2e5d2c79b5a83e8ef91e004f6f3e62531cd70cfdff02159268cb", - "e9fb8ae8ff720acd91025229478a21e43e8e976e30119a76c293201adf572736", - "f65a0dc0e07b5d084ff24c69dcdb953f7b57101d2ebb716d4dfb5963076ef807", - "2bf38af1646489c2c086f811d082054cd29e23fa7bb5c525396bec01b3ab688e" -] - -const cats = [ - "fcffc3e997d952007d1b902a9cf40b750ba4a410ac65bfd95475996bf51359e4", - "3172a5fa159754d703489dfba5af520b8ace107cdf170f4c4cb38a6797aa163f", - "500012dbff4498a9c4513369d6b9b373fab9330ffd2cb1e622294043cc21b610", - "84e3a8d34ee7d0c8e7a2926dd1acad46a0b66b9d27725b3a7e5053550f490301" -] - -const roasted = [ - "a2bfbce0046c9a52a0eabf98f73e0f8e09959970431fc892ebdb4e1c97031b50", - "6eca1adf06851f99e9cdfbb496c27d46ff81106903d11f3346a146e96082b016", - "49a124c9ed6fbbad4105b3657dc25de369bcafb9d6787f610c08f584cd607d0f", - "c3afb59420c812cbc7c8f57ad3e8d79407f10106a99f829aa65316c99d0b29c4", - "2b808858836a5c205080f5b93201ef92e098cff931d8de6d9f20dc722997d077", - "07bef89d1a7d63c9c5ed64ba0f73d6cff689811847c2e20c8b3fbfb060e1d64e", - "baeb994922d5473f534aa54322d83effe74c6c4dac807e6b523a677d7acdc17b", - "ea4735a879edd5cc94ca7db26edd5a970df69a41f0009d3444486647e44175af", - "f2412249030454cd13ac6f7965871d924c16daacda0123de81892adb19ce49ac", - "9958c56e12bab8549cf752bcd8bec4ac36cf79c404b1faf5611f057bb71bc0e1", - "76cdade0b3d4caf0888f60318a5cbca00f830a3b0bf37735fc64fdaeb67c34d3", - "1bf53c97869e1ea89bda19da64a9173d48fe4ec823e949e2c898f8abb3fbf457", - "1bf53c97869e1ea89bda19da64a9173d48fe4ec823e949e2c898f8abb3fbf457", - "3d7f973fab8f4a19c0a3e59efe970ed7bd55a1cb795752d9cbe3c19e8a7d81ec" -] - -const banned = [ - "8a05d4869d9d6ce388c6cd2db13ca12b88097b90f9be027d5ffaaa467c7a6e5e", - "0c475212a608138244c5fc150b1563e5ef79c516234fd78dcd5993f726c359a0", - "df17388805f99f2ff3e5ae97a0f55e5c927eb47f17ca65822bf8c88f02bac3dd", - "86c3355d1bd581cdf7306729d8dd0ee9b7a317b9cfd6d7a6f5fad9c0dafe2167", - "23a2484cd420c9ffbfcc2c0075a9b330664450ced1fc64ab6a65e278086b8c6e", - "fb4cabe709b62eea1b4cc0030c76f5e4a43ee677ce19124e8e7bafa86c78ab66", - "d99c26daee85f7dc81c46c061a5874cff7179ed72d884d2316d664d36ffe7ab5", - "b93c38af5aa221d76c60ee3eb762efee0cdb0daf29ceb235b7dda6d46c06490d", - "8cf6c8765dc757319461dd9a785e77c201b8e5a604d36b817cd987c6a5e62500", - "f4a1cb290745717f86c3cee30fc324c0d80a9945fcbc7bbeb010579f58792f1e", - "7c87c47c42fc983119551342be9ddd5b32e530c0504ccdbbaa1e12b1d9f1bbcb", - "d04fad4f21d030da7a1301afbf480ef6246eb7bbf0f26e31865b2e015a25f747", - "d685ff22fb9da01ee949db212770729603989850864ef7a7085e1f086cfa7deb", - "533b90588d9ccf7967da54691f575e9fd4926c6e0b5fd94a47b932bcea270bee", - "9c2d61f28f5bb7f3f1dc9122be64cda8a428b46ce68b70120da4c41dba96ba4c", - "5d4b1a3eebe64dfa631d0e3b084bd96ee9364c3669269f838ca17a4900276264", - "d56f56413b9679fc0820a2c0237224ded8554c61fab8959c174123c8b68ba029", - "323a9ab60739726070d615ff3a05d7ff6bb6e3c4dd9ff16ce24f253ecd7b8851", - "975c6739de7d4999db15972f707f5f4e95649275f1c0c48e895b8c537e8638ec", - "67ee26eb9e1c1c7124797321b02bca90a19c18171782917cd4a487b722484dce", - "6df5aa7b72a4e6e3fb726489ff1437daa5752047507f4da912680b1d6647c7d6", - "b0864805364359e8c5810c233b1bf2c74dedce9055ae5f7680ba05b4e39db8e2", - "a8f841472ecffdd6266151148320c8e36847a24ead9d3338e0313b075c16649d", - "f9b127cd90e85b0ff68dd220361671663f0154b2b827f1f7ea797b020ca0018c", - "d5c20e9a1ecf01c82da24c514d867498b3e5f522adc1523ce29404a6563641d5", - "241022b49d7c0aba24a61eea1137a804f36e4bcb47af42950275baac9b4e7aac", - "fc99a70e17b6c86ef1b537654b0f50353567a7b59912c3ba955f3fca4d1ea696", - "255306e968009003d295cb2a7256f27bfcdb5d1743bf4d9f2aa4b8adf1a7734d", - "048c7b709763dd9c43794d241c369f0abcb079d546ddcbbba9968a1ed1da7ed7", - "520cbfeef3e4c405d79478eedccb97a4d476be585626dd2b1c53292797491bc7", - "f9f28a7ae7e8b1719b350a04dc087a4b8e33478d109ceeef6ba892b32d1105c9", - "d177f1bfe603647ef4c1c0e6f1a7172081fb9bbc2ea859705949f2c5aa5d4f22", - "302feef2c09247fbd23789581f7f5e2219f88ae0a937880954938573c2a52a84", - "99edd6f57b864873835f16f19c805dd94bed9da8967b84e3a62782f106d9ebcc", - "e75e5f01dcd8351c9553e89558085bd68e6feb295dee5d8da0c9b43ee303ce36", - "135e52a026aea9d2e12de358a85e05cf21121a18269269b7c62678c3bc846f5b", - "28e5b2d3eb5f1ef4cc7b570878b03acf303a6ca4ca95893591e0fb943b0beab0", - "a26b26340f8d0363633490556d20bcc250726d10e1431eb8c22d6b1ff3f2b14a", - "27e4ddde96ec6a1dbe1cf12d79448b3e72f144944c15b299629542d1b65fbabf", - "efd9c0a391ee93251046a58326d1b21b33fe21d71a3fb1855b9048ade53df77c", - "6d505fcce416c26a606878aab4d249a034ba2a9846cb1f883e0f9e3fb76ba6da", - "3a37b8a1b72f9bca51233536d50f9c8d33a787434684787871e0049c82347cda", - "16f9b451184a7c3148344c7d0315f5312ca20553d2271912ecaad91810d977e6", - "7406537eb74d1885bd05e191228de313b13702a64d90ae1736c6377b25ab579a", - "7e4d1395ae18980015cab16c85ffa20b4cb90a2db594126e893d0f7ac6eecaa8", - "ba813ee6c25698f0f68a07121d38bb47c9aa404c1ab0a6e767595cb75e1747b8", - "6586c93f3ece83e01ecc1eb84a7711e7975826a388d478a009468ea0ed9dc03e", - "8960174c74d86e03ae88fb6774580170e49952f2286d960be08c556bbd0dda95", - "4d611454369aa1a4e2b7eed1734fac5d480f08fb86b87a162967e416370f2a8e", - "59d48440f85eabf565fe8d3bc6b973ba64c70df3b36b0511e0e67ceca91762b3", - "cd926926e2af74e43d1a6a420a7e1933b78662320477a3c018b2711d8765e339", - "80e90057df6a59823f51aafac36ed5bc4e5ac26d675d9c1467501590c82f12d4", - "a9cf28b869b70e258adde5639a048f866ec86f8f3f3d53bfc960b86aa6da9239", - "cc2adbf8ac0cddeefa304d7b20f14a7e047a4b2299cc5e8f898f5c59660bd964", - "92a150a46146e9d3f84899cf15e12514af684e7ee18d7add782ddd4f4a15ef18", - "d9b2e84ef6dc0ce449357d52c9095f69b173a1b848ea2921199d33b0ec10024a", - "a9329a7e4d367a0135c1ca86c6ce5ecabcc26529235229d71b6bf991f7689e21", - "8f160c6fd8ccc3fb2a371a4b52748f0bd030766627c4322e2911fe82f6b10497", - "620e96eae4f3e88cbe0770292b33724c5df3866d83f39df6380441f7271c80e2", - "cafa3481fa3c45ed1e55cd0129c12b477eeab5aa3d6da20cae6d6292f19b0e6d", - "be07994e9a83aa3689e79b6e96123676ccc4fa29f523c28c750c6d60505531ee", - "f6498069768cd3aa79b2b0c91879694f05a259c8ee4a6bb343f0435f74eb1b53", - "c9b6b26cb3a694eb78fcac0a14ad18d46d50907186a9add41022d31d191b2b65" -] - -const young = [ - "ffdf66787b4a33b78b18c18822e334cfe2c8406caf442851deef451bd43140a1", - "858f22219afc4b32a7ba9a27a213d7f495e77c3cceed8147eae5282bf3e23d39", - "8c3c46df84ace3d58d4ce0fbc513017986b33c6002ae369d9f7dd1f892a898cb", - "66caa22b9483fdf026ce67de61067d81535a7c9b3169cbc5c2a455ac8dcc7bec", - "76893047b1eff9fadc7be07b13adb5aaed9c73bcdeea46ee07098605e2c7ff76", - "526cb848754e2baaa17376a5693d90ba3f69f71fd2a866f22876ac8a075849a7", - "f59c38e31d0f64dc1bfcdf34451723bc1a65570e209e5496c8d1d7f6d3d649db", - "e013a67e275c62c1402ccbbb11ad14afb8b8a82318a44c07d67599ed5ac874de", - "3bef34219fb07f867ecbff4d6748f598d6cc0761e17dd0d431ee1f4ec3281374", - "8211bf5f613fac06cd5d074d34c16dfacc9367c8afaa6ad3aff99d145e5221be" -] - -const getFingerprint = (word: string) => { - return computeSecretFingerprint( - word.toLocaleLowerCase().replaceAll(/[^a-zA-Z0-9]/gi, "") - ) -} - -const encode = (list: string[]) => { - console.log(JSON.stringify( - list.sort((a, b) => (b.length - a.length)) - .map(item => getFingerprint(item)), null, 2)) -} - -// encode([ "badword" ]) - -export const filterOutBadWords = (sentence: string) => { - if (process.env.ENABLE_CENSORSHIP !== "true") { return sentence } - - let requireCensorship = false - - const words = sentence.replaceAll(/[^a-zA-Z0-9]/gi, " ").replaceAll(/\s+/gi, " ").trim().split(" ") - - const sanitized = words.map(word => { - const fingerprint = getFingerprint(word) - - let result: string = word - // some users want to play it smart and bypass our system so let's play too - if (chickens.includes(fingerprint)) { - result = "large chicken" - } else if (ducks.includes(fingerprint)) { - result = "big duck" - } else if (cats.includes(fingerprint)) { - result = "cat" - } else if (roasted.includes(fingerprint)) { - result = "roasted chicken" - } else if (young.includes(fingerprint)) { - result = "adult" - } else if (banned.includes(fingerprint)) { - result = "_BANNED_" - } - - if (result !== word) { - requireCensorship = true - } - return result - }).filter(item => item !== "_BANNED_").join(" ") - - // if the user didn't try to use a bad word, we leave it untouched - // he words array has been degraded by the replace operation, but it removes commas etc which isn't great - // so if the request was genuine and SFW, it's best to return the original prompt - return requireCensorship ? sanitized : sentence -} \ No newline at end of file diff --git a/spaces/Bart92/RVC_HF/demucs/__init__.py b/spaces/Bart92/RVC_HF/demucs/__init__.py deleted file mode 100644 index d4182e356427e1b05a79f8da641c70bb732514fa..0000000000000000000000000000000000000000 --- a/spaces/Bart92/RVC_HF/demucs/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -__version__ = "2.0.3" diff --git a/spaces/Benson/text-generation/Examples/9anime Mod Apk Download.md b/spaces/Benson/text-generation/Examples/9anime Mod Apk Download.md deleted file mode 100644 index b253397916fb2cf090838b4cd9505385db155534..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/9anime Mod Apk Download.md +++ /dev/null @@ -1,74 +0,0 @@ - -

9anime Mod APK Descargar: Ver anime en línea gratis

-

Anime es una forma popular de animación que se originó en Japón y tiene una base de fans en todo el mundo. A los fanáticos del anime les encanta ver sus programas y películas favoritas en línea, pero encontrar una plataforma de transmisión confiable y legal puede ser un reto. Es por eso que muchos amantes del anime recurren a 9anime, un sitio web de transmisión de video gratuito que ofrece una gran colección de contenido de anime en varios géneros, idiomas y resoluciones. Pero lo que si quieres disfrutar 9anime sin anuncios, pop-ups, o restricciones? Ahí es donde 9anime mod apk viene muy bien. En este artículo, le diremos todo lo que necesita saber sobre 9anime mod apk, cómo descargarlo e instalarlo, y cuáles son las mejores alternativas a 9anime.

-

9anime mod apk download


Downloadhttps://bltlly.com/2v6Mzb



-

¿Qué es 9anime?

-

9anime es un sitio web de transmisión de video gratuito que le permite ver anime en línea sin pagar ni registrarse. Puedes encontrar miles de títulos de anime en 9anime, desde clásicos hasta populares y actuales. También puedes elegir entre diferentes géneros, como acción, comedia, romance, terror, fantasía y más. Ya sea que esté buscando animación japonesa, china o coreana, puede encontrarla en 9anime.

-

Características de 9anime

-

Algunas de las características que hacen 9anime una gran plataforma de streaming de anime son:

-
    -
  • Tiene una interfaz fácil de usar que le permite buscar y navegar fácilmente por su anime favorito.
  • -
  • Ofrece múltiples opciones de calidad de vídeo, de 360p a 1080p, dependiendo de la velocidad de Internet y el dispositivo.
  • -
  • Proporciona versiones de anime tanto subbed como dubbed, para que pueda verlos en su idioma preferido.
  • -
  • Le permite descargar vídeos de anime a su dispositivo para ver sin conexión.
  • -
  • Tiene una función de programación que le muestra los próximos episodios y fechas de lanzamiento de su serie de anime favorita.
  • -
-

Pros y contras de 9anime

-

Como cualquier otra herramienta en línea, 9anime tiene sus ventajas y desventajas. Aquí están algunos de ellos:

- -ProsContras -Tiene una gran y diversa biblioteca de contenido de anime. Opera en un área gris legal y puede ser bloqueado por algunos ISP o regiones. -Es de uso gratuito y no requiere registro o suscripción. Muestra anuncios y ventanas emergentes que pueden ser molestos o perjudiciales. -Actualiza su contenido regularmente con los últimos episodios y películas. Puede tener algunos enlaces rotos o vídeos no disponibles debido a problemas de copyright. -Soporta múltiples dispositivos y plataformas. Puede tener algunos errores o fallos que afectan su rendimiento. - -

¿Qué es 9anime mod apk?

-

9anime mod apk es una versión modificada de la aplicación oficial 9anime que le da acceso a todas las características y beneficios de 9anime sin limitaciones o inconvenientes. Con 9anime mod apk, puede ver anime en línea de forma gratuita sin anuncios, pop-ups, o interrupciones. También puede disfrutar de velocidades de carga más rápidas, mejor calidad de vídeo y más opciones para descargar y transmitir contenido de anime. En resumen, 9anime mod apk is the ultimate anime app for anime fans.

-

Cómo descargar e instalar 9anime mod apk

-

Si desea descargar e instalar 9anime mod apk en su dispositivo Android, es necesario seguir estos pasos:

-
    -
  1. Ir a [este enlace]( 1 ) y descargar la última versión del archivo apk mod 9anime a su dispositivo.
  2. -
  3. Ir a la configuración del dispositivo y permitir la instalación de aplicaciones de fuentes desconocidas.
  4. -
  5. Busque el archivo descargado 9anime mod apk y toque en él para iniciar el proceso de instalación.
  6. -
  7. Siga las instrucciones en la pantalla y espere a que se complete la instalación.
  8. -
  9. Lanzar el 9anime mod apk app y disfrutar viendo anime online gratis.
  10. -
-

Beneficios de usar 9anime mod apk

-

Algunos de los beneficios de usar 9anime mod apk son:

-
    - -
  • Puede descargar vídeos de anime a su dispositivo para ver sin conexión.
  • -
  • Puede elegir entre diferentes opciones de calidad de vídeo, de 360p a 1080p.
  • -
  • Puedes ver las versiones subbed y dubbed de anime en tu idioma preferido.
  • -
  • Puede acceder a una enorme y diversa biblioteca de contenido de anime en varios géneros y categorías.
  • -
-

Las mejores alternativas a 9anime

-

Si estás buscando otras plataformas de streaming de anime que sean similares a 9anime, puedes ver estas alternativas:

-

-

KissAnime

-

KissAnime es uno de los sitios web de streaming de anime más populares y conocidos que ofrece una amplia gama de contenido de anime en alta calidad. Puedes ver anime en línea gratis en KissAnime, o descargarlos en tu dispositivo para verlos sin conexión. También puedes encontrar versiones de anime en KissAnime, así como un foro de la comunidad donde puedes interactuar con otros fans del anime.

-

Crunchyroll

-

Crunchyroll es una plataforma de streaming de anime legal y con licencia que proporciona acceso a miles de títulos de anime, así como manga, drama y juegos. Puedes ver anime online gratis en Crunchyroll, o actualizar a una membresía premium para obtener más características y beneficios. También puedes disfrutar de simulcasts de los últimos episodios de anime, así como contenido original exclusivo de Crunchyroll.

-

AnimeSuge

-

AnimeSuge es un sitio web de transmisión de video gratuito que le permite ver anime en línea sin anuncios ni registro. Puedes encontrar una variedad de géneros y categorías de anime en AnimeSuge, desde acción hasta romance, comedia y terror, y más. También puedes ver versiones subbed y dobladas de anime en AnimeSuge, así como solicitar cualquier anime que quieras ver.

-

Anime-Planet

- -

AnimeFreak

-

AnimeFreak es un sitio web de transmisión de video gratuito que le permite ver anime en línea sin ningún problema. Puede navegar a través de una amplia y actualizada colección de contenido de anime en AnimeFreak, desde los últimos lanzamientos hasta los clásicos. También puedes ver versiones de anime en AnimeFreak, así como disfrutar de velocidades de carga rápidas y una calidad de transmisión suave.

-

Conclusión

-

9anime es una gran opción para ver anime en línea de forma gratuita, pero tiene algunos inconvenientes que pueden afectar a su experiencia de visualización. Es por eso que es posible que desee probar 9anime mod apk, una versión modificada de la aplicación oficial 9anime que le da todas las características y beneficios de 9anime sin limitaciones o inconvenientes. Con 9anime mod apk, puede ver anime en línea de forma gratuita sin anuncios, pop-ups, o interrupciones. También puede descargar vídeos de anime a su dispositivo para su visualización sin conexión, elegir entre diferentes opciones de calidad de vídeo, ver las versiones subbed y dubbed de anime, y acceder a una enorme y diversa biblioteca de contenido de anime. Sin embargo, si estás buscando otras alternativas a 9anime, puedes probar KissAnime, Crunchyroll, AnimeSuge, Anime-Planet o AnimeFreak. Estas son algunas de las mejores plataformas de streaming de anime que ofrecen servicios similares o mejores que 9anime. Esperamos que este artículo le ayudó a aprender más acerca de 9anime mod apk descargar y cómo ver anime en línea gratis.

-

Preguntas frecuentes

-

Aquí hay algunas preguntas frecuentes sobre 9anime mod apk download:

-

Es 9anime mod apk seguro de usar?

-

Sí, 9anime mod apk es seguro de usar siempre y cuando se descarga desde una fuente de confianza. Sin embargo, siempre debe tener cuidado al instalar aplicaciones de fuentes desconocidas y escanearlas en busca de virus o malware antes de usarlas. h4>Es 9anime mod apk legal de usar? - -

¿Funciona 9anime mod apk en dispositivos iOS?

-

No, 9anime mod apk solo es compatible con dispositivos Android. Si desea ver anime en línea de forma gratuita en su dispositivo iOS, tendrá que utilizar el sitio web o aplicación oficial 9anime, o cualquiera de las alternativas mencionadas anteriormente.

-

¿Cómo puedo actualizar 9anime mod apk?

-

Para actualizar 9anime mod apk, tendrá que descargar e instalar la última versión del archivo apk mod de la misma fuente que lo descargó de. También puede necesitar desinstalar la versión anterior de la aplicación antes de instalar la nueva.

-

¿Puedo solicitar cualquier anime en 9anime mod apk?

-

Sí, puede solicitar cualquier anime que desea ver en 9anime mod apk mediante el uso de la función de solicitud en la aplicación. Sin embargo, no hay garantía de que su solicitud se cumplirá, ya que depende de la disponibilidad y legalidad del contenido del anime.

-

¿Puedo ver anime sin conexión en 9anime mod apk?

-

Sí, se puede ver el anime sin conexión en 9anime mod apk mediante la descarga de los vídeos de anime a su dispositivo utilizando la función de descarga en la aplicación. Sin embargo, necesitará tener suficiente espacio de almacenamiento y conexión a Internet para descargar los videos.

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Apk M.facebook.com.md b/spaces/Benson/text-generation/Examples/Apk M.facebook.com.md deleted file mode 100644 index 67bd53ea8324af126946aa13b20755f610db25f8..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Apk M.facebook.com.md +++ /dev/null @@ -1,94 +0,0 @@ - -

m.facebook.com apk: ¿Qué es y cómo descargarlo

-

Facebook es una de las plataformas de redes sociales más populares del mundo, con miles de millones de usuarios que se conectan, comparten e interactúan entre sí cada día. Sin embargo, no todo el mundo tiene un teléfono inteligente potente o una conexión a Internet estable para disfrutar de todas las funciones de la aplicación regular de Facebook. Es por eso que hay una versión alternativa de Facebook que está diseñado para dispositivos de gama baja y redes lentas: m.facebook.com apk. En este artículo, vamos a explicar lo que m.facebook.com apk es, ¿por qué debe usarlo, y cómo descargarlo e instalarlo en su dispositivo Android.

-

Introducción

-

Facebook es una gran manera de mantenerse en contacto con sus amigos y familiares, seguir a sus celebridades y marcas favoritas, ver videos en vivo, jugar juegos y más. Pero a veces, la aplicación regular de Facebook puede ser demasiado pesada y lenta para su dispositivo o su red. Puede ocupar mucho espacio de almacenamiento, consumir muchos datos y batería, y cargar lentamente o estrellarse con frecuencia. Si se enfrentan a estos problemas, es posible que desee probar m.facebook.com apk lugar.

-

apk m.facebook.com


Download Zip ✦✦✦ https://bltlly.com/2v6J39



-

¿Qué es m.facebook.com apk?

-

m.facebook.com apk es una versión más ligera y rápida de Facebook que utiliza menos datos y funciona en todas las condiciones de red. También se conoce como Facebook Lite o FB Lite. Es una aplicación oficial desarrollada por Facebook que tiene como objetivo proporcionar una mejor experiencia para los usuarios que tienen dispositivos de gama baja o conexiones a Internet pobres. Tiene todas las funciones básicas de Facebook, como publicar actualizaciones de estado, compartir fotos y videos, gustar y comentar publicaciones, encontrar eventos, jugar juegos, etc. También admite algunas funciones avanzadas, como transmisión en vivo, historias, grupos, páginas, etc.

-

¿Por qué usar apk m.facebook.com?

-

Hay muchas razones por las que es posible que desee utilizar m.facebook.com apk en lugar de la aplicación regular de Facebook. Estos son algunos de ellos:

-
    - -
  • Funciona en los teléfonos Android antiguos - se puede utilizar en los teléfonos Android más antiguos que no son compatibles con la aplicación regular de Facebook.
  • -
  • Utiliza menos datos - comprime imágenes y videos para reducir el uso de datos. También puede activar el modo de ahorro de datos para guardar aún más datos.
  • -
  • Se carga rápidamente - está optimizado para velocidad y rendimiento. Carga páginas más rápido y muestra actualizaciones de amigos de manera más eficiente.
  • -
  • Funciona en todas las redes - está diseñado para redes 2G y áreas con conexiones a Internet lentas o inestables. Puede acceder a Facebook incluso cuando la señal es débil o la red está congestionada.
  • -
-

Cómo descargar e instalar apk m.facebook.com?

-

Descargar e instalar m.facebook.com apk es muy fácil. Puede seguir estos pasos:

-
    -
  1. Ir a [m.facebook.com]( 1 ) en su navegador.
  2. -
  3. Toque en el botón "Descargar" en la parte superior de la página.
  4. -
  5. Usted será redirigido a la página de Google Play Store de Facebook Lite. Toque en "Instalar" para comenzar a descargar la aplicación.
  6. -
  7. Una vez descargada la aplicación, ábrela e inicia sesión con tu cuenta de Facebook.
  8. -
  9. Disfruta usando apk m.facebook.com en tu dispositivo.
  10. -
-

Características de m.facebook.com apk

-

m.facebook . com apk tiene muchas características que lo convierten en una gran alternativa a la aplicación regular de Facebook. Estos son algunos de ellos:

-

Rápido y ligero

-

m.facebook.com apk es rápido y ligero, lo que significa que funciona sin problemas y de manera eficiente en su dispositivo. No consume mucha memoria o recursos de CPU, por lo que no ralentiza el dispositivo ni agota la batería. Tampoco se bloquea o se congela a menudo, a diferencia de la aplicación regular de Facebook que puede tener errores o problemas técnicos.

-

-

Funciona en dispositivos antiguos y de gama baja

- -

Utiliza menos datos y batería

-

m.facebook.com apk utiliza menos datos y batería que la aplicación regular de Facebook. Comprime imágenes y vídeos antes de enviarlos o recibirlos, lo que reduce la cantidad de datos transferidos a través de la red. También le permite activar el modo de ahorro de datos, lo que limita aún más el uso de datos al desactivar algunas características o cargar contenido de menor calidad. Puede ahorrar hasta el 90% de su uso de datos mediante el uso de apk m.facebook.com. Por otra parte, m.facebook.com apk utiliza menos energía que la aplicación regular de Facebook, lo que significa que no agota la batería tan rápido. Puede utilizar apk m.facebook.com durante períodos más largos sin preocuparse por quedarse sin batería.

-

Soporta todas las funciones de Facebook

-

m.facebook.com apk soporta todas las funciones de Facebook que necesita para mantenerse conectado y entretenido. Puedes hacer todo lo que puedas en la aplicación regular de Facebook, como:

-
    -
  • Publicar actualizaciones de estado, fotos, vídeos, e historias
  • -
  • Como, comentar y compartir mensajes de tus amigos y páginas que sigues
  • -
  • Chatea con tus amigos y familiares usando Messenger Lite
  • -
  • Ver vídeos e historias en vivo de tus amigos y páginas que sigues
  • -
  • Encuentra eventos cerca de ti e invita a tus amigos a unirse
  • -
  • Juega con tus amigos usando juegos instantáneos
  • -
  • Crear grupos y páginas para conectar con personas que comparten sus intereses
  • -
  • Descubre nuevas personas y páginas a seguir usando Explore
  • -
  • Gestiona tu perfil y configuración usando Menú
  • -
-

Pros y contras de m.facebook.com apk

-

m.facebook.com apk tiene muchas ventajas sobre la aplicación regular de Facebook, pero también tiene algunos inconvenientes que usted debe tener en cuenta. Aquí están algunos de los pros y los contras de m.facebook.com apk:

-

Pros

-

Ahorre espacio de almacenamiento y uso de datos

- -

Acceda a Facebook incluso en malas condiciones de red

-

m.facebook.com apk le permite acceder a Facebook incluso en condiciones de red pobres mediante la optimización de la velocidad y el rendimiento. Funciona bien en redes 2G y áreas con conexiones a Internet lentas o inestables. Carga páginas más rápido y muestra actualizaciones de amigos de manera más eficiente. Puedes acceder a Facebook incluso cuando la señal es débil o la red está congestionada.

-

Disfruta de una interfaz sencilla y fácil de usar

-

m.facebook.com apk tiene una interfaz simple y fácil de usar que hace que sea fácil de usar y navegar. Tiene un diseño limpio y minimalista que se centra en las características esenciales de Facebook. No tiene ningún elemento innecesario o de distracción que pueda desordenar la pantalla o confundir al usuario. También tiene una opción de modo oscuro que reduce la fatiga ocular y ahorra vida de la batería.

-

Contras

-

Puede tener algunos problemas de compatibilidad con algunos dispositivos

-

m.facebook.com apk puede tener algunos problemas de compatibilidad con algunos dispositivos que pueden afectar a su funcionalidad o rendimiento. Algunos usuarios han reportado problemas como estrellarse, congelarse, retrasarse o no cargar correctamente en algunos dispositivos. Estos problemas pueden ser causados por varios factores, como el modelo de dispositivo, la versión del sistema operativo, la configuración de red, etc. Si encuentra alguno de estos problemas, puede intentar actualizar la aplicación, limpiar la caché, reiniciar el dispositivo o ponerse en contacto con el desarrollador para obtener soporte.

-

Puede que no soporte algunas funciones o actualizaciones de la aplicación regular de Facebook

- -

Puede tener menor calidad de imágenes y videos

-

m.facebook.com apk puede tener menor calidad de imágenes y videos que la aplicación regular de Facebook, ya que los comprime para guardar los datos y acelerar la carga. Esto puede resultar en imágenes y videos borrosos, pixelados o distorsionados que pueden no verse tan bien como los originales. Si quieres ver imágenes y videos de alta calidad en Facebook, es posible que desee utilizar la aplicación regular de Facebook en su lugar.

-

Conclusión

-

m.facebook.com apk es una gran alternativa a la aplicación regular de Facebook para los usuarios que tienen dispositivos de gama baja o conexiones a Internet pobres. Es rápido, ligero y utiliza menos datos y batería que la aplicación regular de Facebook. También funciona en dispositivos antiguos y de gama baja y es compatible con todas las funciones de Facebook. Sin embargo, también tiene algunos inconvenientes, como problemas de compatibilidad, falta de algunas características o actualizaciones y menor calidad de imágenes y videos. Usted debe pesar los pros y los contras de m.facebook.com apk antes de decidir si usarlo o no.

-

Preguntas frecuentes

-

Aquí hay algunas preguntas frecuentes sobre m.facebook.com apk:

-
    -
  1. ¿Es m.facebook.com apk seguro de usar?
  2. -

    Sí, m.facebook.com apk es seguro de usar, ya que es una aplicación oficial desarrollada por Facebook. No contiene ningún malware o virus que pueda dañar su dispositivo o su privacidad. Sin embargo, siempre debe descargarlo de una fuente confiable, como [m.facebook.com] o Google Play Store.

    -
  3. ¿Es m.facebook.com apk libre de usar?
  4. -

    Sí, m.facebook.com apk es de uso gratuito, al igual que la aplicación regular de Facebook. No es necesario pagar ninguna cuota o cargos para descargar o usarlo. Sin embargo, puede incurrir en cargos de datos de su proveedor de red si lo usa sin una conexión Wi-Fi.

    -
  5. ¿Puedo usar apk m.facebook.com y la aplicación regular de Facebook al mismo tiempo?
  6. - -
  7. ¿Cómo puedo actualizar apk m.facebook.com?
  8. -

    Puede actualizar m.facebook.com apk siguiendo estos pasos:

    -
      -
    • Ir a [m.facebook.com] en su navegador.
    • -
    • Toque en el botón "Descargar" en la parte superior de la página.
    • -
    • Usted será redirigido a la página de Google Play Store de Facebook Lite. Toque en "Actualizar" para iniciar la actualización de la aplicación.
    • -
    • Una vez que la aplicación se actualiza, ábrela y disfruta de la última versión.
    • -
    -
  9. ¿Cómo puedo eliminar m.facebook.com apk?
  10. -

    Puede eliminar m.facebook.com apk siguiendo estos pasos:

    -
      -
    • Ir a la configuración de su dispositivo y toque en "Aplicaciones".
    • -
    • Encuentra y toca "Facebook Lite".
    • -
    • Toque en "Desinstalar" y confirme su acción.
    • -
    • La aplicación se eliminará de su dispositivo.
    • -
    -

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/locations/__init__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/locations/__init__.py deleted file mode 100644 index d54bc63eba364bda3f869a0f3b1863b872f9682a..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/locations/__init__.py +++ /dev/null @@ -1,467 +0,0 @@ -import functools -import logging -import os -import pathlib -import sys -import sysconfig -from typing import Any, Dict, Generator, Optional, Tuple - -from pip._internal.models.scheme import SCHEME_KEYS, Scheme -from pip._internal.utils.compat import WINDOWS -from pip._internal.utils.deprecation import deprecated -from pip._internal.utils.virtualenv import running_under_virtualenv - -from . import _sysconfig -from .base import ( - USER_CACHE_DIR, - get_major_minor_version, - get_src_prefix, - is_osx_framework, - site_packages, - user_site, -) - -__all__ = [ - "USER_CACHE_DIR", - "get_bin_prefix", - "get_bin_user", - "get_major_minor_version", - "get_platlib", - "get_purelib", - "get_scheme", - "get_src_prefix", - "site_packages", - "user_site", -] - - -logger = logging.getLogger(__name__) - - -_PLATLIBDIR: str = getattr(sys, "platlibdir", "lib") - -_USE_SYSCONFIG_DEFAULT = sys.version_info >= (3, 10) - - -def _should_use_sysconfig() -> bool: - """This function determines the value of _USE_SYSCONFIG. - - By default, pip uses sysconfig on Python 3.10+. - But Python distributors can override this decision by setting: - sysconfig._PIP_USE_SYSCONFIG = True / False - Rationale in https://github.com/pypa/pip/issues/10647 - - This is a function for testability, but should be constant during any one - run. - """ - return bool(getattr(sysconfig, "_PIP_USE_SYSCONFIG", _USE_SYSCONFIG_DEFAULT)) - - -_USE_SYSCONFIG = _should_use_sysconfig() - -if not _USE_SYSCONFIG: - # Import distutils lazily to avoid deprecation warnings, - # but import it soon enough that it is in memory and available during - # a pip reinstall. - from . import _distutils - -# Be noisy about incompatibilities if this platforms "should" be using -# sysconfig, but is explicitly opting out and using distutils instead. -if _USE_SYSCONFIG_DEFAULT and not _USE_SYSCONFIG: - _MISMATCH_LEVEL = logging.WARNING -else: - _MISMATCH_LEVEL = logging.DEBUG - - -def _looks_like_bpo_44860() -> bool: - """The resolution to bpo-44860 will change this incorrect platlib. - - See . - """ - from distutils.command.install import INSTALL_SCHEMES - - try: - unix_user_platlib = INSTALL_SCHEMES["unix_user"]["platlib"] - except KeyError: - return False - return unix_user_platlib == "$usersite" - - -def _looks_like_red_hat_patched_platlib_purelib(scheme: Dict[str, str]) -> bool: - platlib = scheme["platlib"] - if "/$platlibdir/" in platlib: - platlib = platlib.replace("/$platlibdir/", f"/{_PLATLIBDIR}/") - if "/lib64/" not in platlib: - return False - unpatched = platlib.replace("/lib64/", "/lib/") - return unpatched.replace("$platbase/", "$base/") == scheme["purelib"] - - -@functools.lru_cache(maxsize=None) -def _looks_like_red_hat_lib() -> bool: - """Red Hat patches platlib in unix_prefix and unix_home, but not purelib. - - This is the only way I can see to tell a Red Hat-patched Python. - """ - from distutils.command.install import INSTALL_SCHEMES - - return all( - k in INSTALL_SCHEMES - and _looks_like_red_hat_patched_platlib_purelib(INSTALL_SCHEMES[k]) - for k in ("unix_prefix", "unix_home") - ) - - -@functools.lru_cache(maxsize=None) -def _looks_like_debian_scheme() -> bool: - """Debian adds two additional schemes.""" - from distutils.command.install import INSTALL_SCHEMES - - return "deb_system" in INSTALL_SCHEMES and "unix_local" in INSTALL_SCHEMES - - -@functools.lru_cache(maxsize=None) -def _looks_like_red_hat_scheme() -> bool: - """Red Hat patches ``sys.prefix`` and ``sys.exec_prefix``. - - Red Hat's ``00251-change-user-install-location.patch`` changes the install - command's ``prefix`` and ``exec_prefix`` to append ``"/local"``. This is - (fortunately?) done quite unconditionally, so we create a default command - object without any configuration to detect this. - """ - from distutils.command.install import install - from distutils.dist import Distribution - - cmd: Any = install(Distribution()) - cmd.finalize_options() - return ( - cmd.exec_prefix == f"{os.path.normpath(sys.exec_prefix)}/local" - and cmd.prefix == f"{os.path.normpath(sys.prefix)}/local" - ) - - -@functools.lru_cache(maxsize=None) -def _looks_like_slackware_scheme() -> bool: - """Slackware patches sysconfig but fails to patch distutils and site. - - Slackware changes sysconfig's user scheme to use ``"lib64"`` for the lib - path, but does not do the same to the site module. - """ - if user_site is None: # User-site not available. - return False - try: - paths = sysconfig.get_paths(scheme="posix_user", expand=False) - except KeyError: # User-site not available. - return False - return "/lib64/" in paths["purelib"] and "/lib64/" not in user_site - - -@functools.lru_cache(maxsize=None) -def _looks_like_msys2_mingw_scheme() -> bool: - """MSYS2 patches distutils and sysconfig to use a UNIX-like scheme. - - However, MSYS2 incorrectly patches sysconfig ``nt`` scheme. The fix is - likely going to be included in their 3.10 release, so we ignore the warning. - See msys2/MINGW-packages#9319. - - MSYS2 MINGW's patch uses lowercase ``"lib"`` instead of the usual uppercase, - and is missing the final ``"site-packages"``. - """ - paths = sysconfig.get_paths("nt", expand=False) - return all( - "Lib" not in p and "lib" in p and not p.endswith("site-packages") - for p in (paths[key] for key in ("platlib", "purelib")) - ) - - -def _fix_abiflags(parts: Tuple[str]) -> Generator[str, None, None]: - ldversion = sysconfig.get_config_var("LDVERSION") - abiflags = getattr(sys, "abiflags", None) - - # LDVERSION does not end with sys.abiflags. Just return the path unchanged. - if not ldversion or not abiflags or not ldversion.endswith(abiflags): - yield from parts - return - - # Strip sys.abiflags from LDVERSION-based path components. - for part in parts: - if part.endswith(ldversion): - part = part[: (0 - len(abiflags))] - yield part - - -@functools.lru_cache(maxsize=None) -def _warn_mismatched(old: pathlib.Path, new: pathlib.Path, *, key: str) -> None: - issue_url = "https://github.com/pypa/pip/issues/10151" - message = ( - "Value for %s does not match. Please report this to <%s>" - "\ndistutils: %s" - "\nsysconfig: %s" - ) - logger.log(_MISMATCH_LEVEL, message, key, issue_url, old, new) - - -def _warn_if_mismatch(old: pathlib.Path, new: pathlib.Path, *, key: str) -> bool: - if old == new: - return False - _warn_mismatched(old, new, key=key) - return True - - -@functools.lru_cache(maxsize=None) -def _log_context( - *, - user: bool = False, - home: Optional[str] = None, - root: Optional[str] = None, - prefix: Optional[str] = None, -) -> None: - parts = [ - "Additional context:", - "user = %r", - "home = %r", - "root = %r", - "prefix = %r", - ] - - logger.log(_MISMATCH_LEVEL, "\n".join(parts), user, home, root, prefix) - - -def get_scheme( - dist_name: str, - user: bool = False, - home: Optional[str] = None, - root: Optional[str] = None, - isolated: bool = False, - prefix: Optional[str] = None, -) -> Scheme: - new = _sysconfig.get_scheme( - dist_name, - user=user, - home=home, - root=root, - isolated=isolated, - prefix=prefix, - ) - if _USE_SYSCONFIG: - return new - - old = _distutils.get_scheme( - dist_name, - user=user, - home=home, - root=root, - isolated=isolated, - prefix=prefix, - ) - - warning_contexts = [] - for k in SCHEME_KEYS: - old_v = pathlib.Path(getattr(old, k)) - new_v = pathlib.Path(getattr(new, k)) - - if old_v == new_v: - continue - - # distutils incorrectly put PyPy packages under ``site-packages/python`` - # in the ``posix_home`` scheme, but PyPy devs said they expect the - # directory name to be ``pypy`` instead. So we treat this as a bug fix - # and not warn about it. See bpo-43307 and python/cpython#24628. - skip_pypy_special_case = ( - sys.implementation.name == "pypy" - and home is not None - and k in ("platlib", "purelib") - and old_v.parent == new_v.parent - and old_v.name.startswith("python") - and new_v.name.startswith("pypy") - ) - if skip_pypy_special_case: - continue - - # sysconfig's ``osx_framework_user`` does not include ``pythonX.Y`` in - # the ``include`` value, but distutils's ``headers`` does. We'll let - # CPython decide whether this is a bug or feature. See bpo-43948. - skip_osx_framework_user_special_case = ( - user - and is_osx_framework() - and k == "headers" - and old_v.parent.parent == new_v.parent - and old_v.parent.name.startswith("python") - ) - if skip_osx_framework_user_special_case: - continue - - # On Red Hat and derived Linux distributions, distutils is patched to - # use "lib64" instead of "lib" for platlib. - if k == "platlib" and _looks_like_red_hat_lib(): - continue - - # On Python 3.9+, sysconfig's posix_user scheme sets platlib against - # sys.platlibdir, but distutils's unix_user incorrectly coninutes - # using the same $usersite for both platlib and purelib. This creates a - # mismatch when sys.platlibdir is not "lib". - skip_bpo_44860 = ( - user - and k == "platlib" - and not WINDOWS - and sys.version_info >= (3, 9) - and _PLATLIBDIR != "lib" - and _looks_like_bpo_44860() - ) - if skip_bpo_44860: - continue - - # Slackware incorrectly patches posix_user to use lib64 instead of lib, - # but not usersite to match the location. - skip_slackware_user_scheme = ( - user - and k in ("platlib", "purelib") - and not WINDOWS - and _looks_like_slackware_scheme() - ) - if skip_slackware_user_scheme: - continue - - # Both Debian and Red Hat patch Python to place the system site under - # /usr/local instead of /usr. Debian also places lib in dist-packages - # instead of site-packages, but the /usr/local check should cover it. - skip_linux_system_special_case = ( - not (user or home or prefix or running_under_virtualenv()) - and old_v.parts[1:3] == ("usr", "local") - and len(new_v.parts) > 1 - and new_v.parts[1] == "usr" - and (len(new_v.parts) < 3 or new_v.parts[2] != "local") - and (_looks_like_red_hat_scheme() or _looks_like_debian_scheme()) - ) - if skip_linux_system_special_case: - continue - - # On Python 3.7 and earlier, sysconfig does not include sys.abiflags in - # the "pythonX.Y" part of the path, but distutils does. - skip_sysconfig_abiflag_bug = ( - sys.version_info < (3, 8) - and not WINDOWS - and k in ("headers", "platlib", "purelib") - and tuple(_fix_abiflags(old_v.parts)) == new_v.parts - ) - if skip_sysconfig_abiflag_bug: - continue - - # MSYS2 MINGW's sysconfig patch does not include the "site-packages" - # part of the path. This is incorrect and will be fixed in MSYS. - skip_msys2_mingw_bug = ( - WINDOWS and k in ("platlib", "purelib") and _looks_like_msys2_mingw_scheme() - ) - if skip_msys2_mingw_bug: - continue - - # CPython's POSIX install script invokes pip (via ensurepip) against the - # interpreter located in the source tree, not the install site. This - # triggers special logic in sysconfig that's not present in distutils. - # https://github.com/python/cpython/blob/8c21941ddaf/Lib/sysconfig.py#L178-L194 - skip_cpython_build = ( - sysconfig.is_python_build(check_home=True) - and not WINDOWS - and k in ("headers", "include", "platinclude") - ) - if skip_cpython_build: - continue - - warning_contexts.append((old_v, new_v, f"scheme.{k}")) - - if not warning_contexts: - return old - - # Check if this path mismatch is caused by distutils config files. Those - # files will no longer work once we switch to sysconfig, so this raises a - # deprecation message for them. - default_old = _distutils.distutils_scheme( - dist_name, - user, - home, - root, - isolated, - prefix, - ignore_config_files=True, - ) - if any(default_old[k] != getattr(old, k) for k in SCHEME_KEYS): - deprecated( - reason=( - "Configuring installation scheme with distutils config files " - "is deprecated and will no longer work in the near future. If you " - "are using a Homebrew or Linuxbrew Python, please see discussion " - "at https://github.com/Homebrew/homebrew-core/issues/76621" - ), - replacement=None, - gone_in=None, - ) - return old - - # Post warnings about this mismatch so user can report them back. - for old_v, new_v, key in warning_contexts: - _warn_mismatched(old_v, new_v, key=key) - _log_context(user=user, home=home, root=root, prefix=prefix) - - return old - - -def get_bin_prefix() -> str: - new = _sysconfig.get_bin_prefix() - if _USE_SYSCONFIG: - return new - - old = _distutils.get_bin_prefix() - if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="bin_prefix"): - _log_context() - return old - - -def get_bin_user() -> str: - return _sysconfig.get_scheme("", user=True).scripts - - -def _looks_like_deb_system_dist_packages(value: str) -> bool: - """Check if the value is Debian's APT-controlled dist-packages. - - Debian's ``distutils.sysconfig.get_python_lib()`` implementation returns the - default package path controlled by APT, but does not patch ``sysconfig`` to - do the same. This is similar to the bug worked around in ``get_scheme()``, - but here the default is ``deb_system`` instead of ``unix_local``. Ultimately - we can't do anything about this Debian bug, and this detection allows us to - skip the warning when needed. - """ - if not _looks_like_debian_scheme(): - return False - if value == "/usr/lib/python3/dist-packages": - return True - return False - - -def get_purelib() -> str: - """Return the default pure-Python lib location.""" - new = _sysconfig.get_purelib() - if _USE_SYSCONFIG: - return new - - old = _distutils.get_purelib() - if _looks_like_deb_system_dist_packages(old): - return old - if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="purelib"): - _log_context() - return old - - -def get_platlib() -> str: - """Return the default platform-shared lib location.""" - new = _sysconfig.get_platlib() - if _USE_SYSCONFIG: - return new - - from . import _distutils - - old = _distutils.get_platlib() - if _looks_like_deb_system_dist_packages(old): - return old - if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="platlib"): - _log_context() - return old diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/macromanprober.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/macromanprober.py deleted file mode 100644 index 1425d10ecaa59a9e49b73cea2b8b4747de73f6b5..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/macromanprober.py +++ /dev/null @@ -1,162 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# This code was modified from latin1prober.py by Rob Speer . -# The Original Code is Mozilla Universal charset detector code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 2001 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Rob Speer - adapt to MacRoman encoding -# Mark Pilgrim - port to Python -# Shy Shalom - original C code -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from typing import List, Union - -from .charsetprober import CharSetProber -from .enums import ProbingState - -FREQ_CAT_NUM = 4 - -UDF = 0 # undefined -OTH = 1 # other -ASC = 2 # ascii capital letter -ASS = 3 # ascii small letter -ACV = 4 # accent capital vowel -ACO = 5 # accent capital other -ASV = 6 # accent small vowel -ASO = 7 # accent small other -ODD = 8 # character that is unlikely to appear -CLASS_NUM = 9 # total classes - -# The change from Latin1 is that we explicitly look for extended characters -# that are infrequently-occurring symbols, and consider them to always be -# improbable. This should let MacRoman get out of the way of more likely -# encodings in most situations. - -# fmt: off -MacRoman_CharToClass = ( - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F - OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47 - ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F - ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57 - ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F - OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67 - ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F - ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77 - ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F - ACV, ACV, ACO, ACV, ACO, ACV, ACV, ASV, # 80 - 87 - ASV, ASV, ASV, ASV, ASV, ASO, ASV, ASV, # 88 - 8F - ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASV, # 90 - 97 - ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # 98 - 9F - OTH, OTH, OTH, OTH, OTH, OTH, OTH, ASO, # A0 - A7 - OTH, OTH, ODD, ODD, OTH, OTH, ACV, ACV, # A8 - AF - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7 - OTH, OTH, OTH, OTH, OTH, OTH, ASV, ASV, # B8 - BF - OTH, OTH, ODD, OTH, ODD, OTH, OTH, OTH, # C0 - C7 - OTH, OTH, OTH, ACV, ACV, ACV, ACV, ASV, # C8 - CF - OTH, OTH, OTH, OTH, OTH, OTH, OTH, ODD, # D0 - D7 - ASV, ACV, ODD, OTH, OTH, OTH, OTH, OTH, # D8 - DF - OTH, OTH, OTH, OTH, OTH, ACV, ACV, ACV, # E0 - E7 - ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # E8 - EF - ODD, ACV, ACV, ACV, ACV, ASV, ODD, ODD, # F0 - F7 - ODD, ODD, ODD, ODD, ODD, ODD, ODD, ODD, # F8 - FF -) - -# 0 : illegal -# 1 : very unlikely -# 2 : normal -# 3 : very likely -MacRomanClassModel = ( -# UDF OTH ASC ASS ACV ACO ASV ASO ODD - 0, 0, 0, 0, 0, 0, 0, 0, 0, # UDF - 0, 3, 3, 3, 3, 3, 3, 3, 1, # OTH - 0, 3, 3, 3, 3, 3, 3, 3, 1, # ASC - 0, 3, 3, 3, 1, 1, 3, 3, 1, # ASS - 0, 3, 3, 3, 1, 2, 1, 2, 1, # ACV - 0, 3, 3, 3, 3, 3, 3, 3, 1, # ACO - 0, 3, 1, 3, 1, 1, 1, 3, 1, # ASV - 0, 3, 1, 3, 1, 1, 3, 3, 1, # ASO - 0, 1, 1, 1, 1, 1, 1, 1, 1, # ODD -) -# fmt: on - - -class MacRomanProber(CharSetProber): - def __init__(self) -> None: - super().__init__() - self._last_char_class = OTH - self._freq_counter: List[int] = [] - self.reset() - - def reset(self) -> None: - self._last_char_class = OTH - self._freq_counter = [0] * FREQ_CAT_NUM - - # express the prior that MacRoman is a somewhat rare encoding; - # this can be done by starting out in a slightly improbable state - # that must be overcome - self._freq_counter[2] = 10 - - super().reset() - - @property - def charset_name(self) -> str: - return "MacRoman" - - @property - def language(self) -> str: - return "" - - def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState: - byte_str = self.remove_xml_tags(byte_str) - for c in byte_str: - char_class = MacRoman_CharToClass[c] - freq = MacRomanClassModel[(self._last_char_class * CLASS_NUM) + char_class] - if freq == 0: - self._state = ProbingState.NOT_ME - break - self._freq_counter[freq] += 1 - self._last_char_class = char_class - - return self.state - - def get_confidence(self) -> float: - if self.state == ProbingState.NOT_ME: - return 0.01 - - total = sum(self._freq_counter) - confidence = ( - 0.0 - if total < 0.01 - else (self._freq_counter[3] - self._freq_counter[1] * 20.0) / total - ) - confidence = max(confidence, 0.0) - # lower the confidence of MacRoman so that other more accurate - # detector can take priority. - confidence *= 0.73 - return confidence diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/idna/uts46data.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/idna/uts46data.py deleted file mode 100644 index 186796c17b25c1e766112ef4d9f16bb2dea4b306..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/idna/uts46data.py +++ /dev/null @@ -1,8600 +0,0 @@ -# This file is automatically generated by tools/idna-data -# vim: set fileencoding=utf-8 : - -from typing import List, Tuple, Union - - -"""IDNA Mapping Table from UTS46.""" - - -__version__ = '15.0.0' -def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x0, '3'), - (0x1, '3'), - (0x2, '3'), - (0x3, '3'), - (0x4, '3'), - (0x5, '3'), - (0x6, '3'), - (0x7, '3'), - (0x8, '3'), - (0x9, '3'), - (0xA, '3'), - (0xB, '3'), - (0xC, '3'), - (0xD, '3'), - (0xE, '3'), - (0xF, '3'), - (0x10, '3'), - (0x11, '3'), - (0x12, '3'), - (0x13, '3'), - (0x14, '3'), - (0x15, '3'), - (0x16, '3'), - (0x17, '3'), - (0x18, '3'), - (0x19, '3'), - (0x1A, '3'), - (0x1B, '3'), - (0x1C, '3'), - (0x1D, '3'), - (0x1E, '3'), - (0x1F, '3'), - (0x20, '3'), - (0x21, '3'), - (0x22, '3'), - (0x23, '3'), - (0x24, '3'), - (0x25, '3'), - (0x26, '3'), - (0x27, '3'), - (0x28, '3'), - (0x29, '3'), - (0x2A, '3'), - (0x2B, '3'), - (0x2C, '3'), - (0x2D, 'V'), - (0x2E, 'V'), - (0x2F, '3'), - (0x30, 'V'), - (0x31, 'V'), - (0x32, 'V'), - (0x33, 'V'), - (0x34, 'V'), - (0x35, 'V'), - (0x36, 'V'), - (0x37, 'V'), - (0x38, 'V'), - (0x39, 'V'), - (0x3A, '3'), - (0x3B, '3'), - (0x3C, '3'), - (0x3D, '3'), - (0x3E, '3'), - (0x3F, '3'), - (0x40, '3'), - (0x41, 'M', 'a'), - (0x42, 'M', 'b'), - (0x43, 'M', 'c'), - (0x44, 'M', 'd'), - (0x45, 'M', 'e'), - (0x46, 'M', 'f'), - (0x47, 'M', 'g'), - (0x48, 'M', 'h'), - (0x49, 'M', 'i'), - (0x4A, 'M', 'j'), - (0x4B, 'M', 'k'), - (0x4C, 'M', 'l'), - (0x4D, 'M', 'm'), - (0x4E, 'M', 'n'), - (0x4F, 'M', 'o'), - (0x50, 'M', 'p'), - (0x51, 'M', 'q'), - (0x52, 'M', 'r'), - (0x53, 'M', 's'), - (0x54, 'M', 't'), - (0x55, 'M', 'u'), - (0x56, 'M', 'v'), - (0x57, 'M', 'w'), - (0x58, 'M', 'x'), - (0x59, 'M', 'y'), - (0x5A, 'M', 'z'), - (0x5B, '3'), - (0x5C, '3'), - (0x5D, '3'), - (0x5E, '3'), - (0x5F, '3'), - (0x60, '3'), - (0x61, 'V'), - (0x62, 'V'), - (0x63, 'V'), - ] - -def _seg_1() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x64, 'V'), - (0x65, 'V'), - (0x66, 'V'), - (0x67, 'V'), - (0x68, 'V'), - (0x69, 'V'), - (0x6A, 'V'), - (0x6B, 'V'), - (0x6C, 'V'), - (0x6D, 'V'), - (0x6E, 'V'), - (0x6F, 'V'), - (0x70, 'V'), - (0x71, 'V'), - (0x72, 'V'), - (0x73, 'V'), - (0x74, 'V'), - (0x75, 'V'), - (0x76, 'V'), - (0x77, 'V'), - (0x78, 'V'), - (0x79, 'V'), - (0x7A, 'V'), - (0x7B, '3'), - (0x7C, '3'), - (0x7D, '3'), - (0x7E, '3'), - (0x7F, '3'), - (0x80, 'X'), - (0x81, 'X'), - (0x82, 'X'), - (0x83, 'X'), - (0x84, 'X'), - (0x85, 'X'), - (0x86, 'X'), - (0x87, 'X'), - (0x88, 'X'), - (0x89, 'X'), - (0x8A, 'X'), - (0x8B, 'X'), - (0x8C, 'X'), - (0x8D, 'X'), - (0x8E, 'X'), - (0x8F, 'X'), - (0x90, 'X'), - (0x91, 'X'), - (0x92, 'X'), - (0x93, 'X'), - (0x94, 'X'), - (0x95, 'X'), - (0x96, 'X'), - (0x97, 'X'), - (0x98, 'X'), - (0x99, 'X'), - (0x9A, 'X'), - (0x9B, 'X'), - (0x9C, 'X'), - (0x9D, 'X'), - (0x9E, 'X'), - (0x9F, 'X'), - (0xA0, '3', ' '), - (0xA1, 'V'), - (0xA2, 'V'), - (0xA3, 'V'), - (0xA4, 'V'), - (0xA5, 'V'), - (0xA6, 'V'), - (0xA7, 'V'), - (0xA8, '3', ' ̈'), - (0xA9, 'V'), - (0xAA, 'M', 'a'), - (0xAB, 'V'), - (0xAC, 'V'), - (0xAD, 'I'), - (0xAE, 'V'), - (0xAF, '3', ' ̄'), - (0xB0, 'V'), - (0xB1, 'V'), - (0xB2, 'M', '2'), - (0xB3, 'M', '3'), - (0xB4, '3', ' ́'), - (0xB5, 'M', 'μ'), - (0xB6, 'V'), - (0xB7, 'V'), - (0xB8, '3', ' ̧'), - (0xB9, 'M', '1'), - (0xBA, 'M', 'o'), - (0xBB, 'V'), - (0xBC, 'M', '1⁄4'), - (0xBD, 'M', '1⁄2'), - (0xBE, 'M', '3⁄4'), - (0xBF, 'V'), - (0xC0, 'M', 'à'), - (0xC1, 'M', 'á'), - (0xC2, 'M', 'â'), - (0xC3, 'M', 'ã'), - (0xC4, 'M', 'ä'), - (0xC5, 'M', 'å'), - (0xC6, 'M', 'æ'), - (0xC7, 'M', 'ç'), - ] - -def _seg_2() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xC8, 'M', 'è'), - (0xC9, 'M', 'é'), - (0xCA, 'M', 'ê'), - (0xCB, 'M', 'ë'), - (0xCC, 'M', 'ì'), - (0xCD, 'M', 'í'), - (0xCE, 'M', 'î'), - (0xCF, 'M', 'ï'), - (0xD0, 'M', 'ð'), - (0xD1, 'M', 'ñ'), - (0xD2, 'M', 'ò'), - (0xD3, 'M', 'ó'), - (0xD4, 'M', 'ô'), - (0xD5, 'M', 'õ'), - (0xD6, 'M', 'ö'), - (0xD7, 'V'), - (0xD8, 'M', 'ø'), - (0xD9, 'M', 'ù'), - (0xDA, 'M', 'ú'), - (0xDB, 'M', 'û'), - (0xDC, 'M', 'ü'), - (0xDD, 'M', 'ý'), - (0xDE, 'M', 'þ'), - (0xDF, 'D', 'ss'), - (0xE0, 'V'), - (0xE1, 'V'), - (0xE2, 'V'), - (0xE3, 'V'), - (0xE4, 'V'), - (0xE5, 'V'), - (0xE6, 'V'), - (0xE7, 'V'), - (0xE8, 'V'), - (0xE9, 'V'), - (0xEA, 'V'), - (0xEB, 'V'), - (0xEC, 'V'), - (0xED, 'V'), - (0xEE, 'V'), - (0xEF, 'V'), - (0xF0, 'V'), - (0xF1, 'V'), - (0xF2, 'V'), - (0xF3, 'V'), - (0xF4, 'V'), - (0xF5, 'V'), - (0xF6, 'V'), - (0xF7, 'V'), - (0xF8, 'V'), - (0xF9, 'V'), - (0xFA, 'V'), - (0xFB, 'V'), - (0xFC, 'V'), - (0xFD, 'V'), - (0xFE, 'V'), - (0xFF, 'V'), - (0x100, 'M', 'ā'), - (0x101, 'V'), - (0x102, 'M', 'ă'), - (0x103, 'V'), - (0x104, 'M', 'ą'), - (0x105, 'V'), - (0x106, 'M', 'ć'), - (0x107, 'V'), - (0x108, 'M', 'ĉ'), - (0x109, 'V'), - (0x10A, 'M', 'ċ'), - (0x10B, 'V'), - (0x10C, 'M', 'č'), - (0x10D, 'V'), - (0x10E, 'M', 'ď'), - (0x10F, 'V'), - (0x110, 'M', 'đ'), - (0x111, 'V'), - (0x112, 'M', 'ē'), - (0x113, 'V'), - (0x114, 'M', 'ĕ'), - (0x115, 'V'), - (0x116, 'M', 'ė'), - (0x117, 'V'), - (0x118, 'M', 'ę'), - (0x119, 'V'), - (0x11A, 'M', 'ě'), - (0x11B, 'V'), - (0x11C, 'M', 'ĝ'), - (0x11D, 'V'), - (0x11E, 'M', 'ğ'), - (0x11F, 'V'), - (0x120, 'M', 'ġ'), - (0x121, 'V'), - (0x122, 'M', 'ģ'), - (0x123, 'V'), - (0x124, 'M', 'ĥ'), - (0x125, 'V'), - (0x126, 'M', 'ħ'), - (0x127, 'V'), - (0x128, 'M', 'ĩ'), - (0x129, 'V'), - (0x12A, 'M', 'ī'), - (0x12B, 'V'), - ] - -def _seg_3() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x12C, 'M', 'ĭ'), - (0x12D, 'V'), - (0x12E, 'M', 'į'), - (0x12F, 'V'), - (0x130, 'M', 'i̇'), - (0x131, 'V'), - (0x132, 'M', 'ij'), - (0x134, 'M', 'ĵ'), - (0x135, 'V'), - (0x136, 'M', 'ķ'), - (0x137, 'V'), - (0x139, 'M', 'ĺ'), - (0x13A, 'V'), - (0x13B, 'M', 'ļ'), - (0x13C, 'V'), - (0x13D, 'M', 'ľ'), - (0x13E, 'V'), - (0x13F, 'M', 'l·'), - (0x141, 'M', 'ł'), - (0x142, 'V'), - (0x143, 'M', 'ń'), - (0x144, 'V'), - (0x145, 'M', 'ņ'), - (0x146, 'V'), - (0x147, 'M', 'ň'), - (0x148, 'V'), - (0x149, 'M', 'ʼn'), - (0x14A, 'M', 'ŋ'), - (0x14B, 'V'), - (0x14C, 'M', 'ō'), - (0x14D, 'V'), - (0x14E, 'M', 'ŏ'), - (0x14F, 'V'), - (0x150, 'M', 'ő'), - (0x151, 'V'), - (0x152, 'M', 'œ'), - (0x153, 'V'), - (0x154, 'M', 'ŕ'), - (0x155, 'V'), - (0x156, 'M', 'ŗ'), - (0x157, 'V'), - (0x158, 'M', 'ř'), - (0x159, 'V'), - (0x15A, 'M', 'ś'), - (0x15B, 'V'), - (0x15C, 'M', 'ŝ'), - (0x15D, 'V'), - (0x15E, 'M', 'ş'), - (0x15F, 'V'), - (0x160, 'M', 'š'), - (0x161, 'V'), - (0x162, 'M', 'ţ'), - (0x163, 'V'), - (0x164, 'M', 'ť'), - (0x165, 'V'), - (0x166, 'M', 'ŧ'), - (0x167, 'V'), - (0x168, 'M', 'ũ'), - (0x169, 'V'), - (0x16A, 'M', 'ū'), - (0x16B, 'V'), - (0x16C, 'M', 'ŭ'), - (0x16D, 'V'), - (0x16E, 'M', 'ů'), - (0x16F, 'V'), - (0x170, 'M', 'ű'), - (0x171, 'V'), - (0x172, 'M', 'ų'), - (0x173, 'V'), - (0x174, 'M', 'ŵ'), - (0x175, 'V'), - (0x176, 'M', 'ŷ'), - (0x177, 'V'), - (0x178, 'M', 'ÿ'), - (0x179, 'M', 'ź'), - (0x17A, 'V'), - (0x17B, 'M', 'ż'), - (0x17C, 'V'), - (0x17D, 'M', 'ž'), - (0x17E, 'V'), - (0x17F, 'M', 's'), - (0x180, 'V'), - (0x181, 'M', 'ɓ'), - (0x182, 'M', 'ƃ'), - (0x183, 'V'), - (0x184, 'M', 'ƅ'), - (0x185, 'V'), - (0x186, 'M', 'ɔ'), - (0x187, 'M', 'ƈ'), - (0x188, 'V'), - (0x189, 'M', 'ɖ'), - (0x18A, 'M', 'ɗ'), - (0x18B, 'M', 'ƌ'), - (0x18C, 'V'), - (0x18E, 'M', 'ǝ'), - (0x18F, 'M', 'ə'), - (0x190, 'M', 'ɛ'), - (0x191, 'M', 'ƒ'), - (0x192, 'V'), - (0x193, 'M', 'ɠ'), - ] - -def _seg_4() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x194, 'M', 'ɣ'), - (0x195, 'V'), - (0x196, 'M', 'ɩ'), - (0x197, 'M', 'ɨ'), - (0x198, 'M', 'ƙ'), - (0x199, 'V'), - (0x19C, 'M', 'ɯ'), - (0x19D, 'M', 'ɲ'), - (0x19E, 'V'), - (0x19F, 'M', 'ɵ'), - (0x1A0, 'M', 'ơ'), - (0x1A1, 'V'), - (0x1A2, 'M', 'ƣ'), - (0x1A3, 'V'), - (0x1A4, 'M', 'ƥ'), - (0x1A5, 'V'), - (0x1A6, 'M', 'ʀ'), - (0x1A7, 'M', 'ƨ'), - (0x1A8, 'V'), - (0x1A9, 'M', 'ʃ'), - (0x1AA, 'V'), - (0x1AC, 'M', 'ƭ'), - (0x1AD, 'V'), - (0x1AE, 'M', 'ʈ'), - (0x1AF, 'M', 'ư'), - (0x1B0, 'V'), - (0x1B1, 'M', 'ʊ'), - (0x1B2, 'M', 'ʋ'), - (0x1B3, 'M', 'ƴ'), - (0x1B4, 'V'), - (0x1B5, 'M', 'ƶ'), - (0x1B6, 'V'), - (0x1B7, 'M', 'ʒ'), - (0x1B8, 'M', 'ƹ'), - (0x1B9, 'V'), - (0x1BC, 'M', 'ƽ'), - (0x1BD, 'V'), - (0x1C4, 'M', 'dž'), - (0x1C7, 'M', 'lj'), - (0x1CA, 'M', 'nj'), - (0x1CD, 'M', 'ǎ'), - (0x1CE, 'V'), - (0x1CF, 'M', 'ǐ'), - (0x1D0, 'V'), - (0x1D1, 'M', 'ǒ'), - (0x1D2, 'V'), - (0x1D3, 'M', 'ǔ'), - (0x1D4, 'V'), - (0x1D5, 'M', 'ǖ'), - (0x1D6, 'V'), - (0x1D7, 'M', 'ǘ'), - (0x1D8, 'V'), - (0x1D9, 'M', 'ǚ'), - (0x1DA, 'V'), - (0x1DB, 'M', 'ǜ'), - (0x1DC, 'V'), - (0x1DE, 'M', 'ǟ'), - (0x1DF, 'V'), - (0x1E0, 'M', 'ǡ'), - (0x1E1, 'V'), - (0x1E2, 'M', 'ǣ'), - (0x1E3, 'V'), - (0x1E4, 'M', 'ǥ'), - (0x1E5, 'V'), - (0x1E6, 'M', 'ǧ'), - (0x1E7, 'V'), - (0x1E8, 'M', 'ǩ'), - (0x1E9, 'V'), - (0x1EA, 'M', 'ǫ'), - (0x1EB, 'V'), - (0x1EC, 'M', 'ǭ'), - (0x1ED, 'V'), - (0x1EE, 'M', 'ǯ'), - (0x1EF, 'V'), - (0x1F1, 'M', 'dz'), - (0x1F4, 'M', 'ǵ'), - (0x1F5, 'V'), - (0x1F6, 'M', 'ƕ'), - (0x1F7, 'M', 'ƿ'), - (0x1F8, 'M', 'ǹ'), - (0x1F9, 'V'), - (0x1FA, 'M', 'ǻ'), - (0x1FB, 'V'), - (0x1FC, 'M', 'ǽ'), - (0x1FD, 'V'), - (0x1FE, 'M', 'ǿ'), - (0x1FF, 'V'), - (0x200, 'M', 'ȁ'), - (0x201, 'V'), - (0x202, 'M', 'ȃ'), - (0x203, 'V'), - (0x204, 'M', 'ȅ'), - (0x205, 'V'), - (0x206, 'M', 'ȇ'), - (0x207, 'V'), - (0x208, 'M', 'ȉ'), - (0x209, 'V'), - (0x20A, 'M', 'ȋ'), - (0x20B, 'V'), - (0x20C, 'M', 'ȍ'), - ] - -def _seg_5() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x20D, 'V'), - (0x20E, 'M', 'ȏ'), - (0x20F, 'V'), - (0x210, 'M', 'ȑ'), - (0x211, 'V'), - (0x212, 'M', 'ȓ'), - (0x213, 'V'), - (0x214, 'M', 'ȕ'), - (0x215, 'V'), - (0x216, 'M', 'ȗ'), - (0x217, 'V'), - (0x218, 'M', 'ș'), - (0x219, 'V'), - (0x21A, 'M', 'ț'), - (0x21B, 'V'), - (0x21C, 'M', 'ȝ'), - (0x21D, 'V'), - (0x21E, 'M', 'ȟ'), - (0x21F, 'V'), - (0x220, 'M', 'ƞ'), - (0x221, 'V'), - (0x222, 'M', 'ȣ'), - (0x223, 'V'), - (0x224, 'M', 'ȥ'), - (0x225, 'V'), - (0x226, 'M', 'ȧ'), - (0x227, 'V'), - (0x228, 'M', 'ȩ'), - (0x229, 'V'), - (0x22A, 'M', 'ȫ'), - (0x22B, 'V'), - (0x22C, 'M', 'ȭ'), - (0x22D, 'V'), - (0x22E, 'M', 'ȯ'), - (0x22F, 'V'), - (0x230, 'M', 'ȱ'), - (0x231, 'V'), - (0x232, 'M', 'ȳ'), - (0x233, 'V'), - (0x23A, 'M', 'ⱥ'), - (0x23B, 'M', 'ȼ'), - (0x23C, 'V'), - (0x23D, 'M', 'ƚ'), - (0x23E, 'M', 'ⱦ'), - (0x23F, 'V'), - (0x241, 'M', 'ɂ'), - (0x242, 'V'), - (0x243, 'M', 'ƀ'), - (0x244, 'M', 'ʉ'), - (0x245, 'M', 'ʌ'), - (0x246, 'M', 'ɇ'), - (0x247, 'V'), - (0x248, 'M', 'ɉ'), - (0x249, 'V'), - (0x24A, 'M', 'ɋ'), - (0x24B, 'V'), - (0x24C, 'M', 'ɍ'), - (0x24D, 'V'), - (0x24E, 'M', 'ɏ'), - (0x24F, 'V'), - (0x2B0, 'M', 'h'), - (0x2B1, 'M', 'ɦ'), - (0x2B2, 'M', 'j'), - (0x2B3, 'M', 'r'), - (0x2B4, 'M', 'ɹ'), - (0x2B5, 'M', 'ɻ'), - (0x2B6, 'M', 'ʁ'), - (0x2B7, 'M', 'w'), - (0x2B8, 'M', 'y'), - (0x2B9, 'V'), - (0x2D8, '3', ' ̆'), - (0x2D9, '3', ' ̇'), - (0x2DA, '3', ' ̊'), - (0x2DB, '3', ' ̨'), - (0x2DC, '3', ' ̃'), - (0x2DD, '3', ' ̋'), - (0x2DE, 'V'), - (0x2E0, 'M', 'ɣ'), - (0x2E1, 'M', 'l'), - (0x2E2, 'M', 's'), - (0x2E3, 'M', 'x'), - (0x2E4, 'M', 'ʕ'), - (0x2E5, 'V'), - (0x340, 'M', '̀'), - (0x341, 'M', '́'), - (0x342, 'V'), - (0x343, 'M', '̓'), - (0x344, 'M', '̈́'), - (0x345, 'M', 'ι'), - (0x346, 'V'), - (0x34F, 'I'), - (0x350, 'V'), - (0x370, 'M', 'ͱ'), - (0x371, 'V'), - (0x372, 'M', 'ͳ'), - (0x373, 'V'), - (0x374, 'M', 'ʹ'), - (0x375, 'V'), - (0x376, 'M', 'ͷ'), - (0x377, 'V'), - ] - -def _seg_6() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x378, 'X'), - (0x37A, '3', ' ι'), - (0x37B, 'V'), - (0x37E, '3', ';'), - (0x37F, 'M', 'ϳ'), - (0x380, 'X'), - (0x384, '3', ' ́'), - (0x385, '3', ' ̈́'), - (0x386, 'M', 'ά'), - (0x387, 'M', '·'), - (0x388, 'M', 'έ'), - (0x389, 'M', 'ή'), - (0x38A, 'M', 'ί'), - (0x38B, 'X'), - (0x38C, 'M', 'ό'), - (0x38D, 'X'), - (0x38E, 'M', 'ύ'), - (0x38F, 'M', 'ώ'), - (0x390, 'V'), - (0x391, 'M', 'α'), - (0x392, 'M', 'β'), - (0x393, 'M', 'γ'), - (0x394, 'M', 'δ'), - (0x395, 'M', 'ε'), - (0x396, 'M', 'ζ'), - (0x397, 'M', 'η'), - (0x398, 'M', 'θ'), - (0x399, 'M', 'ι'), - (0x39A, 'M', 'κ'), - (0x39B, 'M', 'λ'), - (0x39C, 'M', 'μ'), - (0x39D, 'M', 'ν'), - (0x39E, 'M', 'ξ'), - (0x39F, 'M', 'ο'), - (0x3A0, 'M', 'π'), - (0x3A1, 'M', 'ρ'), - (0x3A2, 'X'), - (0x3A3, 'M', 'σ'), - (0x3A4, 'M', 'τ'), - (0x3A5, 'M', 'υ'), - (0x3A6, 'M', 'φ'), - (0x3A7, 'M', 'χ'), - (0x3A8, 'M', 'ψ'), - (0x3A9, 'M', 'ω'), - (0x3AA, 'M', 'ϊ'), - (0x3AB, 'M', 'ϋ'), - (0x3AC, 'V'), - (0x3C2, 'D', 'σ'), - (0x3C3, 'V'), - (0x3CF, 'M', 'ϗ'), - (0x3D0, 'M', 'β'), - (0x3D1, 'M', 'θ'), - (0x3D2, 'M', 'υ'), - (0x3D3, 'M', 'ύ'), - (0x3D4, 'M', 'ϋ'), - (0x3D5, 'M', 'φ'), - (0x3D6, 'M', 'π'), - (0x3D7, 'V'), - (0x3D8, 'M', 'ϙ'), - (0x3D9, 'V'), - (0x3DA, 'M', 'ϛ'), - (0x3DB, 'V'), - (0x3DC, 'M', 'ϝ'), - (0x3DD, 'V'), - (0x3DE, 'M', 'ϟ'), - (0x3DF, 'V'), - (0x3E0, 'M', 'ϡ'), - (0x3E1, 'V'), - (0x3E2, 'M', 'ϣ'), - (0x3E3, 'V'), - (0x3E4, 'M', 'ϥ'), - (0x3E5, 'V'), - (0x3E6, 'M', 'ϧ'), - (0x3E7, 'V'), - (0x3E8, 'M', 'ϩ'), - (0x3E9, 'V'), - (0x3EA, 'M', 'ϫ'), - (0x3EB, 'V'), - (0x3EC, 'M', 'ϭ'), - (0x3ED, 'V'), - (0x3EE, 'M', 'ϯ'), - (0x3EF, 'V'), - (0x3F0, 'M', 'κ'), - (0x3F1, 'M', 'ρ'), - (0x3F2, 'M', 'σ'), - (0x3F3, 'V'), - (0x3F4, 'M', 'θ'), - (0x3F5, 'M', 'ε'), - (0x3F6, 'V'), - (0x3F7, 'M', 'ϸ'), - (0x3F8, 'V'), - (0x3F9, 'M', 'σ'), - (0x3FA, 'M', 'ϻ'), - (0x3FB, 'V'), - (0x3FD, 'M', 'ͻ'), - (0x3FE, 'M', 'ͼ'), - (0x3FF, 'M', 'ͽ'), - (0x400, 'M', 'ѐ'), - (0x401, 'M', 'ё'), - (0x402, 'M', 'ђ'), - ] - -def _seg_7() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x403, 'M', 'ѓ'), - (0x404, 'M', 'є'), - (0x405, 'M', 'ѕ'), - (0x406, 'M', 'і'), - (0x407, 'M', 'ї'), - (0x408, 'M', 'ј'), - (0x409, 'M', 'љ'), - (0x40A, 'M', 'њ'), - (0x40B, 'M', 'ћ'), - (0x40C, 'M', 'ќ'), - (0x40D, 'M', 'ѝ'), - (0x40E, 'M', 'ў'), - (0x40F, 'M', 'џ'), - (0x410, 'M', 'а'), - (0x411, 'M', 'б'), - (0x412, 'M', 'в'), - (0x413, 'M', 'г'), - (0x414, 'M', 'д'), - (0x415, 'M', 'е'), - (0x416, 'M', 'ж'), - (0x417, 'M', 'з'), - (0x418, 'M', 'и'), - (0x419, 'M', 'й'), - (0x41A, 'M', 'к'), - (0x41B, 'M', 'л'), - (0x41C, 'M', 'м'), - (0x41D, 'M', 'н'), - (0x41E, 'M', 'о'), - (0x41F, 'M', 'п'), - (0x420, 'M', 'р'), - (0x421, 'M', 'с'), - (0x422, 'M', 'т'), - (0x423, 'M', 'у'), - (0x424, 'M', 'ф'), - (0x425, 'M', 'х'), - (0x426, 'M', 'ц'), - (0x427, 'M', 'ч'), - (0x428, 'M', 'ш'), - (0x429, 'M', 'щ'), - (0x42A, 'M', 'ъ'), - (0x42B, 'M', 'ы'), - (0x42C, 'M', 'ь'), - (0x42D, 'M', 'э'), - (0x42E, 'M', 'ю'), - (0x42F, 'M', 'я'), - (0x430, 'V'), - (0x460, 'M', 'ѡ'), - (0x461, 'V'), - (0x462, 'M', 'ѣ'), - (0x463, 'V'), - (0x464, 'M', 'ѥ'), - (0x465, 'V'), - (0x466, 'M', 'ѧ'), - (0x467, 'V'), - (0x468, 'M', 'ѩ'), - (0x469, 'V'), - (0x46A, 'M', 'ѫ'), - (0x46B, 'V'), - (0x46C, 'M', 'ѭ'), - (0x46D, 'V'), - (0x46E, 'M', 'ѯ'), - (0x46F, 'V'), - (0x470, 'M', 'ѱ'), - (0x471, 'V'), - (0x472, 'M', 'ѳ'), - (0x473, 'V'), - (0x474, 'M', 'ѵ'), - (0x475, 'V'), - (0x476, 'M', 'ѷ'), - (0x477, 'V'), - (0x478, 'M', 'ѹ'), - (0x479, 'V'), - (0x47A, 'M', 'ѻ'), - (0x47B, 'V'), - (0x47C, 'M', 'ѽ'), - (0x47D, 'V'), - (0x47E, 'M', 'ѿ'), - (0x47F, 'V'), - (0x480, 'M', 'ҁ'), - (0x481, 'V'), - (0x48A, 'M', 'ҋ'), - (0x48B, 'V'), - (0x48C, 'M', 'ҍ'), - (0x48D, 'V'), - (0x48E, 'M', 'ҏ'), - (0x48F, 'V'), - (0x490, 'M', 'ґ'), - (0x491, 'V'), - (0x492, 'M', 'ғ'), - (0x493, 'V'), - (0x494, 'M', 'ҕ'), - (0x495, 'V'), - (0x496, 'M', 'җ'), - (0x497, 'V'), - (0x498, 'M', 'ҙ'), - (0x499, 'V'), - (0x49A, 'M', 'қ'), - (0x49B, 'V'), - (0x49C, 'M', 'ҝ'), - (0x49D, 'V'), - ] - -def _seg_8() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x49E, 'M', 'ҟ'), - (0x49F, 'V'), - (0x4A0, 'M', 'ҡ'), - (0x4A1, 'V'), - (0x4A2, 'M', 'ң'), - (0x4A3, 'V'), - (0x4A4, 'M', 'ҥ'), - (0x4A5, 'V'), - (0x4A6, 'M', 'ҧ'), - (0x4A7, 'V'), - (0x4A8, 'M', 'ҩ'), - (0x4A9, 'V'), - (0x4AA, 'M', 'ҫ'), - (0x4AB, 'V'), - (0x4AC, 'M', 'ҭ'), - (0x4AD, 'V'), - (0x4AE, 'M', 'ү'), - (0x4AF, 'V'), - (0x4B0, 'M', 'ұ'), - (0x4B1, 'V'), - (0x4B2, 'M', 'ҳ'), - (0x4B3, 'V'), - (0x4B4, 'M', 'ҵ'), - (0x4B5, 'V'), - (0x4B6, 'M', 'ҷ'), - (0x4B7, 'V'), - (0x4B8, 'M', 'ҹ'), - (0x4B9, 'V'), - (0x4BA, 'M', 'һ'), - (0x4BB, 'V'), - (0x4BC, 'M', 'ҽ'), - (0x4BD, 'V'), - (0x4BE, 'M', 'ҿ'), - (0x4BF, 'V'), - (0x4C0, 'X'), - (0x4C1, 'M', 'ӂ'), - (0x4C2, 'V'), - (0x4C3, 'M', 'ӄ'), - (0x4C4, 'V'), - (0x4C5, 'M', 'ӆ'), - (0x4C6, 'V'), - (0x4C7, 'M', 'ӈ'), - (0x4C8, 'V'), - (0x4C9, 'M', 'ӊ'), - (0x4CA, 'V'), - (0x4CB, 'M', 'ӌ'), - (0x4CC, 'V'), - (0x4CD, 'M', 'ӎ'), - (0x4CE, 'V'), - (0x4D0, 'M', 'ӑ'), - (0x4D1, 'V'), - (0x4D2, 'M', 'ӓ'), - (0x4D3, 'V'), - (0x4D4, 'M', 'ӕ'), - (0x4D5, 'V'), - (0x4D6, 'M', 'ӗ'), - (0x4D7, 'V'), - (0x4D8, 'M', 'ә'), - (0x4D9, 'V'), - (0x4DA, 'M', 'ӛ'), - (0x4DB, 'V'), - (0x4DC, 'M', 'ӝ'), - (0x4DD, 'V'), - (0x4DE, 'M', 'ӟ'), - (0x4DF, 'V'), - (0x4E0, 'M', 'ӡ'), - (0x4E1, 'V'), - (0x4E2, 'M', 'ӣ'), - (0x4E3, 'V'), - (0x4E4, 'M', 'ӥ'), - (0x4E5, 'V'), - (0x4E6, 'M', 'ӧ'), - (0x4E7, 'V'), - (0x4E8, 'M', 'ө'), - (0x4E9, 'V'), - (0x4EA, 'M', 'ӫ'), - (0x4EB, 'V'), - (0x4EC, 'M', 'ӭ'), - (0x4ED, 'V'), - (0x4EE, 'M', 'ӯ'), - (0x4EF, 'V'), - (0x4F0, 'M', 'ӱ'), - (0x4F1, 'V'), - (0x4F2, 'M', 'ӳ'), - (0x4F3, 'V'), - (0x4F4, 'M', 'ӵ'), - (0x4F5, 'V'), - (0x4F6, 'M', 'ӷ'), - (0x4F7, 'V'), - (0x4F8, 'M', 'ӹ'), - (0x4F9, 'V'), - (0x4FA, 'M', 'ӻ'), - (0x4FB, 'V'), - (0x4FC, 'M', 'ӽ'), - (0x4FD, 'V'), - (0x4FE, 'M', 'ӿ'), - (0x4FF, 'V'), - (0x500, 'M', 'ԁ'), - (0x501, 'V'), - (0x502, 'M', 'ԃ'), - ] - -def _seg_9() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x503, 'V'), - (0x504, 'M', 'ԅ'), - (0x505, 'V'), - (0x506, 'M', 'ԇ'), - (0x507, 'V'), - (0x508, 'M', 'ԉ'), - (0x509, 'V'), - (0x50A, 'M', 'ԋ'), - (0x50B, 'V'), - (0x50C, 'M', 'ԍ'), - (0x50D, 'V'), - (0x50E, 'M', 'ԏ'), - (0x50F, 'V'), - (0x510, 'M', 'ԑ'), - (0x511, 'V'), - (0x512, 'M', 'ԓ'), - (0x513, 'V'), - (0x514, 'M', 'ԕ'), - (0x515, 'V'), - (0x516, 'M', 'ԗ'), - (0x517, 'V'), - (0x518, 'M', 'ԙ'), - (0x519, 'V'), - (0x51A, 'M', 'ԛ'), - (0x51B, 'V'), - (0x51C, 'M', 'ԝ'), - (0x51D, 'V'), - (0x51E, 'M', 'ԟ'), - (0x51F, 'V'), - (0x520, 'M', 'ԡ'), - (0x521, 'V'), - (0x522, 'M', 'ԣ'), - (0x523, 'V'), - (0x524, 'M', 'ԥ'), - (0x525, 'V'), - (0x526, 'M', 'ԧ'), - (0x527, 'V'), - (0x528, 'M', 'ԩ'), - (0x529, 'V'), - (0x52A, 'M', 'ԫ'), - (0x52B, 'V'), - (0x52C, 'M', 'ԭ'), - (0x52D, 'V'), - (0x52E, 'M', 'ԯ'), - (0x52F, 'V'), - (0x530, 'X'), - (0x531, 'M', 'ա'), - (0x532, 'M', 'բ'), - (0x533, 'M', 'գ'), - (0x534, 'M', 'դ'), - (0x535, 'M', 'ե'), - (0x536, 'M', 'զ'), - (0x537, 'M', 'է'), - (0x538, 'M', 'ը'), - (0x539, 'M', 'թ'), - (0x53A, 'M', 'ժ'), - (0x53B, 'M', 'ի'), - (0x53C, 'M', 'լ'), - (0x53D, 'M', 'խ'), - (0x53E, 'M', 'ծ'), - (0x53F, 'M', 'կ'), - (0x540, 'M', 'հ'), - (0x541, 'M', 'ձ'), - (0x542, 'M', 'ղ'), - (0x543, 'M', 'ճ'), - (0x544, 'M', 'մ'), - (0x545, 'M', 'յ'), - (0x546, 'M', 'ն'), - (0x547, 'M', 'շ'), - (0x548, 'M', 'ո'), - (0x549, 'M', 'չ'), - (0x54A, 'M', 'պ'), - (0x54B, 'M', 'ջ'), - (0x54C, 'M', 'ռ'), - (0x54D, 'M', 'ս'), - (0x54E, 'M', 'վ'), - (0x54F, 'M', 'տ'), - (0x550, 'M', 'ր'), - (0x551, 'M', 'ց'), - (0x552, 'M', 'ւ'), - (0x553, 'M', 'փ'), - (0x554, 'M', 'ք'), - (0x555, 'M', 'օ'), - (0x556, 'M', 'ֆ'), - (0x557, 'X'), - (0x559, 'V'), - (0x587, 'M', 'եւ'), - (0x588, 'V'), - (0x58B, 'X'), - (0x58D, 'V'), - (0x590, 'X'), - (0x591, 'V'), - (0x5C8, 'X'), - (0x5D0, 'V'), - (0x5EB, 'X'), - (0x5EF, 'V'), - (0x5F5, 'X'), - (0x606, 'V'), - (0x61C, 'X'), - (0x61D, 'V'), - ] - -def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x675, 'M', 'اٴ'), - (0x676, 'M', 'وٴ'), - (0x677, 'M', 'ۇٴ'), - (0x678, 'M', 'يٴ'), - (0x679, 'V'), - (0x6DD, 'X'), - (0x6DE, 'V'), - (0x70E, 'X'), - (0x710, 'V'), - (0x74B, 'X'), - (0x74D, 'V'), - (0x7B2, 'X'), - (0x7C0, 'V'), - (0x7FB, 'X'), - (0x7FD, 'V'), - (0x82E, 'X'), - (0x830, 'V'), - (0x83F, 'X'), - (0x840, 'V'), - (0x85C, 'X'), - (0x85E, 'V'), - (0x85F, 'X'), - (0x860, 'V'), - (0x86B, 'X'), - (0x870, 'V'), - (0x88F, 'X'), - (0x898, 'V'), - (0x8E2, 'X'), - (0x8E3, 'V'), - (0x958, 'M', 'क़'), - (0x959, 'M', 'ख़'), - (0x95A, 'M', 'ग़'), - (0x95B, 'M', 'ज़'), - (0x95C, 'M', 'ड़'), - (0x95D, 'M', 'ढ़'), - (0x95E, 'M', 'फ़'), - (0x95F, 'M', 'य़'), - (0x960, 'V'), - (0x984, 'X'), - (0x985, 'V'), - (0x98D, 'X'), - (0x98F, 'V'), - (0x991, 'X'), - (0x993, 'V'), - (0x9A9, 'X'), - (0x9AA, 'V'), - (0x9B1, 'X'), - (0x9B2, 'V'), - (0x9B3, 'X'), - (0x9B6, 'V'), - (0x9BA, 'X'), - (0x9BC, 'V'), - (0x9C5, 'X'), - (0x9C7, 'V'), - (0x9C9, 'X'), - (0x9CB, 'V'), - (0x9CF, 'X'), - (0x9D7, 'V'), - (0x9D8, 'X'), - (0x9DC, 'M', 'ড়'), - (0x9DD, 'M', 'ঢ়'), - (0x9DE, 'X'), - (0x9DF, 'M', 'য়'), - (0x9E0, 'V'), - (0x9E4, 'X'), - (0x9E6, 'V'), - (0x9FF, 'X'), - (0xA01, 'V'), - (0xA04, 'X'), - (0xA05, 'V'), - (0xA0B, 'X'), - (0xA0F, 'V'), - (0xA11, 'X'), - (0xA13, 'V'), - (0xA29, 'X'), - (0xA2A, 'V'), - (0xA31, 'X'), - (0xA32, 'V'), - (0xA33, 'M', 'ਲ਼'), - (0xA34, 'X'), - (0xA35, 'V'), - (0xA36, 'M', 'ਸ਼'), - (0xA37, 'X'), - (0xA38, 'V'), - (0xA3A, 'X'), - (0xA3C, 'V'), - (0xA3D, 'X'), - (0xA3E, 'V'), - (0xA43, 'X'), - (0xA47, 'V'), - (0xA49, 'X'), - (0xA4B, 'V'), - (0xA4E, 'X'), - (0xA51, 'V'), - (0xA52, 'X'), - (0xA59, 'M', 'ਖ਼'), - (0xA5A, 'M', 'ਗ਼'), - (0xA5B, 'M', 'ਜ਼'), - (0xA5C, 'V'), - (0xA5D, 'X'), - ] - -def _seg_11() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA5E, 'M', 'ਫ਼'), - (0xA5F, 'X'), - (0xA66, 'V'), - (0xA77, 'X'), - (0xA81, 'V'), - (0xA84, 'X'), - (0xA85, 'V'), - (0xA8E, 'X'), - (0xA8F, 'V'), - (0xA92, 'X'), - (0xA93, 'V'), - (0xAA9, 'X'), - (0xAAA, 'V'), - (0xAB1, 'X'), - (0xAB2, 'V'), - (0xAB4, 'X'), - (0xAB5, 'V'), - (0xABA, 'X'), - (0xABC, 'V'), - (0xAC6, 'X'), - (0xAC7, 'V'), - (0xACA, 'X'), - (0xACB, 'V'), - (0xACE, 'X'), - (0xAD0, 'V'), - (0xAD1, 'X'), - (0xAE0, 'V'), - (0xAE4, 'X'), - (0xAE6, 'V'), - (0xAF2, 'X'), - (0xAF9, 'V'), - (0xB00, 'X'), - (0xB01, 'V'), - (0xB04, 'X'), - (0xB05, 'V'), - (0xB0D, 'X'), - (0xB0F, 'V'), - (0xB11, 'X'), - (0xB13, 'V'), - (0xB29, 'X'), - (0xB2A, 'V'), - (0xB31, 'X'), - (0xB32, 'V'), - (0xB34, 'X'), - (0xB35, 'V'), - (0xB3A, 'X'), - (0xB3C, 'V'), - (0xB45, 'X'), - (0xB47, 'V'), - (0xB49, 'X'), - (0xB4B, 'V'), - (0xB4E, 'X'), - (0xB55, 'V'), - (0xB58, 'X'), - (0xB5C, 'M', 'ଡ଼'), - (0xB5D, 'M', 'ଢ଼'), - (0xB5E, 'X'), - (0xB5F, 'V'), - (0xB64, 'X'), - (0xB66, 'V'), - (0xB78, 'X'), - (0xB82, 'V'), - (0xB84, 'X'), - (0xB85, 'V'), - (0xB8B, 'X'), - (0xB8E, 'V'), - (0xB91, 'X'), - (0xB92, 'V'), - (0xB96, 'X'), - (0xB99, 'V'), - (0xB9B, 'X'), - (0xB9C, 'V'), - (0xB9D, 'X'), - (0xB9E, 'V'), - (0xBA0, 'X'), - (0xBA3, 'V'), - (0xBA5, 'X'), - (0xBA8, 'V'), - (0xBAB, 'X'), - (0xBAE, 'V'), - (0xBBA, 'X'), - (0xBBE, 'V'), - (0xBC3, 'X'), - (0xBC6, 'V'), - (0xBC9, 'X'), - (0xBCA, 'V'), - (0xBCE, 'X'), - (0xBD0, 'V'), - (0xBD1, 'X'), - (0xBD7, 'V'), - (0xBD8, 'X'), - (0xBE6, 'V'), - (0xBFB, 'X'), - (0xC00, 'V'), - (0xC0D, 'X'), - (0xC0E, 'V'), - (0xC11, 'X'), - (0xC12, 'V'), - (0xC29, 'X'), - (0xC2A, 'V'), - ] - -def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xC3A, 'X'), - (0xC3C, 'V'), - (0xC45, 'X'), - (0xC46, 'V'), - (0xC49, 'X'), - (0xC4A, 'V'), - (0xC4E, 'X'), - (0xC55, 'V'), - (0xC57, 'X'), - (0xC58, 'V'), - (0xC5B, 'X'), - (0xC5D, 'V'), - (0xC5E, 'X'), - (0xC60, 'V'), - (0xC64, 'X'), - (0xC66, 'V'), - (0xC70, 'X'), - (0xC77, 'V'), - (0xC8D, 'X'), - (0xC8E, 'V'), - (0xC91, 'X'), - (0xC92, 'V'), - (0xCA9, 'X'), - (0xCAA, 'V'), - (0xCB4, 'X'), - (0xCB5, 'V'), - (0xCBA, 'X'), - (0xCBC, 'V'), - (0xCC5, 'X'), - (0xCC6, 'V'), - (0xCC9, 'X'), - (0xCCA, 'V'), - (0xCCE, 'X'), - (0xCD5, 'V'), - (0xCD7, 'X'), - (0xCDD, 'V'), - (0xCDF, 'X'), - (0xCE0, 'V'), - (0xCE4, 'X'), - (0xCE6, 'V'), - (0xCF0, 'X'), - (0xCF1, 'V'), - (0xCF4, 'X'), - (0xD00, 'V'), - (0xD0D, 'X'), - (0xD0E, 'V'), - (0xD11, 'X'), - (0xD12, 'V'), - (0xD45, 'X'), - (0xD46, 'V'), - (0xD49, 'X'), - (0xD4A, 'V'), - (0xD50, 'X'), - (0xD54, 'V'), - (0xD64, 'X'), - (0xD66, 'V'), - (0xD80, 'X'), - (0xD81, 'V'), - (0xD84, 'X'), - (0xD85, 'V'), - (0xD97, 'X'), - (0xD9A, 'V'), - (0xDB2, 'X'), - (0xDB3, 'V'), - (0xDBC, 'X'), - (0xDBD, 'V'), - (0xDBE, 'X'), - (0xDC0, 'V'), - (0xDC7, 'X'), - (0xDCA, 'V'), - (0xDCB, 'X'), - (0xDCF, 'V'), - (0xDD5, 'X'), - (0xDD6, 'V'), - (0xDD7, 'X'), - (0xDD8, 'V'), - (0xDE0, 'X'), - (0xDE6, 'V'), - (0xDF0, 'X'), - (0xDF2, 'V'), - (0xDF5, 'X'), - (0xE01, 'V'), - (0xE33, 'M', 'ํา'), - (0xE34, 'V'), - (0xE3B, 'X'), - (0xE3F, 'V'), - (0xE5C, 'X'), - (0xE81, 'V'), - (0xE83, 'X'), - (0xE84, 'V'), - (0xE85, 'X'), - (0xE86, 'V'), - (0xE8B, 'X'), - (0xE8C, 'V'), - (0xEA4, 'X'), - (0xEA5, 'V'), - (0xEA6, 'X'), - (0xEA7, 'V'), - (0xEB3, 'M', 'ໍາ'), - (0xEB4, 'V'), - ] - -def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xEBE, 'X'), - (0xEC0, 'V'), - (0xEC5, 'X'), - (0xEC6, 'V'), - (0xEC7, 'X'), - (0xEC8, 'V'), - (0xECF, 'X'), - (0xED0, 'V'), - (0xEDA, 'X'), - (0xEDC, 'M', 'ຫນ'), - (0xEDD, 'M', 'ຫມ'), - (0xEDE, 'V'), - (0xEE0, 'X'), - (0xF00, 'V'), - (0xF0C, 'M', '་'), - (0xF0D, 'V'), - (0xF43, 'M', 'གྷ'), - (0xF44, 'V'), - (0xF48, 'X'), - (0xF49, 'V'), - (0xF4D, 'M', 'ཌྷ'), - (0xF4E, 'V'), - (0xF52, 'M', 'དྷ'), - (0xF53, 'V'), - (0xF57, 'M', 'བྷ'), - (0xF58, 'V'), - (0xF5C, 'M', 'ཛྷ'), - (0xF5D, 'V'), - (0xF69, 'M', 'ཀྵ'), - (0xF6A, 'V'), - (0xF6D, 'X'), - (0xF71, 'V'), - (0xF73, 'M', 'ཱི'), - (0xF74, 'V'), - (0xF75, 'M', 'ཱུ'), - (0xF76, 'M', 'ྲྀ'), - (0xF77, 'M', 'ྲཱྀ'), - (0xF78, 'M', 'ླྀ'), - (0xF79, 'M', 'ླཱྀ'), - (0xF7A, 'V'), - (0xF81, 'M', 'ཱྀ'), - (0xF82, 'V'), - (0xF93, 'M', 'ྒྷ'), - (0xF94, 'V'), - (0xF98, 'X'), - (0xF99, 'V'), - (0xF9D, 'M', 'ྜྷ'), - (0xF9E, 'V'), - (0xFA2, 'M', 'ྡྷ'), - (0xFA3, 'V'), - (0xFA7, 'M', 'ྦྷ'), - (0xFA8, 'V'), - (0xFAC, 'M', 'ྫྷ'), - (0xFAD, 'V'), - (0xFB9, 'M', 'ྐྵ'), - (0xFBA, 'V'), - (0xFBD, 'X'), - (0xFBE, 'V'), - (0xFCD, 'X'), - (0xFCE, 'V'), - (0xFDB, 'X'), - (0x1000, 'V'), - (0x10A0, 'X'), - (0x10C7, 'M', 'ⴧ'), - (0x10C8, 'X'), - (0x10CD, 'M', 'ⴭ'), - (0x10CE, 'X'), - (0x10D0, 'V'), - (0x10FC, 'M', 'ნ'), - (0x10FD, 'V'), - (0x115F, 'X'), - (0x1161, 'V'), - (0x1249, 'X'), - (0x124A, 'V'), - (0x124E, 'X'), - (0x1250, 'V'), - (0x1257, 'X'), - (0x1258, 'V'), - (0x1259, 'X'), - (0x125A, 'V'), - (0x125E, 'X'), - (0x1260, 'V'), - (0x1289, 'X'), - (0x128A, 'V'), - (0x128E, 'X'), - (0x1290, 'V'), - (0x12B1, 'X'), - (0x12B2, 'V'), - (0x12B6, 'X'), - (0x12B8, 'V'), - (0x12BF, 'X'), - (0x12C0, 'V'), - (0x12C1, 'X'), - (0x12C2, 'V'), - (0x12C6, 'X'), - (0x12C8, 'V'), - (0x12D7, 'X'), - (0x12D8, 'V'), - (0x1311, 'X'), - (0x1312, 'V'), - ] - -def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1316, 'X'), - (0x1318, 'V'), - (0x135B, 'X'), - (0x135D, 'V'), - (0x137D, 'X'), - (0x1380, 'V'), - (0x139A, 'X'), - (0x13A0, 'V'), - (0x13F6, 'X'), - (0x13F8, 'M', 'Ᏸ'), - (0x13F9, 'M', 'Ᏹ'), - (0x13FA, 'M', 'Ᏺ'), - (0x13FB, 'M', 'Ᏻ'), - (0x13FC, 'M', 'Ᏼ'), - (0x13FD, 'M', 'Ᏽ'), - (0x13FE, 'X'), - (0x1400, 'V'), - (0x1680, 'X'), - (0x1681, 'V'), - (0x169D, 'X'), - (0x16A0, 'V'), - (0x16F9, 'X'), - (0x1700, 'V'), - (0x1716, 'X'), - (0x171F, 'V'), - (0x1737, 'X'), - (0x1740, 'V'), - (0x1754, 'X'), - (0x1760, 'V'), - (0x176D, 'X'), - (0x176E, 'V'), - (0x1771, 'X'), - (0x1772, 'V'), - (0x1774, 'X'), - (0x1780, 'V'), - (0x17B4, 'X'), - (0x17B6, 'V'), - (0x17DE, 'X'), - (0x17E0, 'V'), - (0x17EA, 'X'), - (0x17F0, 'V'), - (0x17FA, 'X'), - (0x1800, 'V'), - (0x1806, 'X'), - (0x1807, 'V'), - (0x180B, 'I'), - (0x180E, 'X'), - (0x180F, 'I'), - (0x1810, 'V'), - (0x181A, 'X'), - (0x1820, 'V'), - (0x1879, 'X'), - (0x1880, 'V'), - (0x18AB, 'X'), - (0x18B0, 'V'), - (0x18F6, 'X'), - (0x1900, 'V'), - (0x191F, 'X'), - (0x1920, 'V'), - (0x192C, 'X'), - (0x1930, 'V'), - (0x193C, 'X'), - (0x1940, 'V'), - (0x1941, 'X'), - (0x1944, 'V'), - (0x196E, 'X'), - (0x1970, 'V'), - (0x1975, 'X'), - (0x1980, 'V'), - (0x19AC, 'X'), - (0x19B0, 'V'), - (0x19CA, 'X'), - (0x19D0, 'V'), - (0x19DB, 'X'), - (0x19DE, 'V'), - (0x1A1C, 'X'), - (0x1A1E, 'V'), - (0x1A5F, 'X'), - (0x1A60, 'V'), - (0x1A7D, 'X'), - (0x1A7F, 'V'), - (0x1A8A, 'X'), - (0x1A90, 'V'), - (0x1A9A, 'X'), - (0x1AA0, 'V'), - (0x1AAE, 'X'), - (0x1AB0, 'V'), - (0x1ACF, 'X'), - (0x1B00, 'V'), - (0x1B4D, 'X'), - (0x1B50, 'V'), - (0x1B7F, 'X'), - (0x1B80, 'V'), - (0x1BF4, 'X'), - (0x1BFC, 'V'), - (0x1C38, 'X'), - (0x1C3B, 'V'), - (0x1C4A, 'X'), - (0x1C4D, 'V'), - (0x1C80, 'M', 'в'), - ] - -def _seg_15() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1C81, 'M', 'д'), - (0x1C82, 'M', 'о'), - (0x1C83, 'M', 'с'), - (0x1C84, 'M', 'т'), - (0x1C86, 'M', 'ъ'), - (0x1C87, 'M', 'ѣ'), - (0x1C88, 'M', 'ꙋ'), - (0x1C89, 'X'), - (0x1C90, 'M', 'ა'), - (0x1C91, 'M', 'ბ'), - (0x1C92, 'M', 'გ'), - (0x1C93, 'M', 'დ'), - (0x1C94, 'M', 'ე'), - (0x1C95, 'M', 'ვ'), - (0x1C96, 'M', 'ზ'), - (0x1C97, 'M', 'თ'), - (0x1C98, 'M', 'ი'), - (0x1C99, 'M', 'კ'), - (0x1C9A, 'M', 'ლ'), - (0x1C9B, 'M', 'მ'), - (0x1C9C, 'M', 'ნ'), - (0x1C9D, 'M', 'ო'), - (0x1C9E, 'M', 'პ'), - (0x1C9F, 'M', 'ჟ'), - (0x1CA0, 'M', 'რ'), - (0x1CA1, 'M', 'ს'), - (0x1CA2, 'M', 'ტ'), - (0x1CA3, 'M', 'უ'), - (0x1CA4, 'M', 'ფ'), - (0x1CA5, 'M', 'ქ'), - (0x1CA6, 'M', 'ღ'), - (0x1CA7, 'M', 'ყ'), - (0x1CA8, 'M', 'შ'), - (0x1CA9, 'M', 'ჩ'), - (0x1CAA, 'M', 'ც'), - (0x1CAB, 'M', 'ძ'), - (0x1CAC, 'M', 'წ'), - (0x1CAD, 'M', 'ჭ'), - (0x1CAE, 'M', 'ხ'), - (0x1CAF, 'M', 'ჯ'), - (0x1CB0, 'M', 'ჰ'), - (0x1CB1, 'M', 'ჱ'), - (0x1CB2, 'M', 'ჲ'), - (0x1CB3, 'M', 'ჳ'), - (0x1CB4, 'M', 'ჴ'), - (0x1CB5, 'M', 'ჵ'), - (0x1CB6, 'M', 'ჶ'), - (0x1CB7, 'M', 'ჷ'), - (0x1CB8, 'M', 'ჸ'), - (0x1CB9, 'M', 'ჹ'), - (0x1CBA, 'M', 'ჺ'), - (0x1CBB, 'X'), - (0x1CBD, 'M', 'ჽ'), - (0x1CBE, 'M', 'ჾ'), - (0x1CBF, 'M', 'ჿ'), - (0x1CC0, 'V'), - (0x1CC8, 'X'), - (0x1CD0, 'V'), - (0x1CFB, 'X'), - (0x1D00, 'V'), - (0x1D2C, 'M', 'a'), - (0x1D2D, 'M', 'æ'), - (0x1D2E, 'M', 'b'), - (0x1D2F, 'V'), - (0x1D30, 'M', 'd'), - (0x1D31, 'M', 'e'), - (0x1D32, 'M', 'ǝ'), - (0x1D33, 'M', 'g'), - (0x1D34, 'M', 'h'), - (0x1D35, 'M', 'i'), - (0x1D36, 'M', 'j'), - (0x1D37, 'M', 'k'), - (0x1D38, 'M', 'l'), - (0x1D39, 'M', 'm'), - (0x1D3A, 'M', 'n'), - (0x1D3B, 'V'), - (0x1D3C, 'M', 'o'), - (0x1D3D, 'M', 'ȣ'), - (0x1D3E, 'M', 'p'), - (0x1D3F, 'M', 'r'), - (0x1D40, 'M', 't'), - (0x1D41, 'M', 'u'), - (0x1D42, 'M', 'w'), - (0x1D43, 'M', 'a'), - (0x1D44, 'M', 'ɐ'), - (0x1D45, 'M', 'ɑ'), - (0x1D46, 'M', 'ᴂ'), - (0x1D47, 'M', 'b'), - (0x1D48, 'M', 'd'), - (0x1D49, 'M', 'e'), - (0x1D4A, 'M', 'ə'), - (0x1D4B, 'M', 'ɛ'), - (0x1D4C, 'M', 'ɜ'), - (0x1D4D, 'M', 'g'), - (0x1D4E, 'V'), - (0x1D4F, 'M', 'k'), - (0x1D50, 'M', 'm'), - (0x1D51, 'M', 'ŋ'), - (0x1D52, 'M', 'o'), - (0x1D53, 'M', 'ɔ'), - ] - -def _seg_16() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D54, 'M', 'ᴖ'), - (0x1D55, 'M', 'ᴗ'), - (0x1D56, 'M', 'p'), - (0x1D57, 'M', 't'), - (0x1D58, 'M', 'u'), - (0x1D59, 'M', 'ᴝ'), - (0x1D5A, 'M', 'ɯ'), - (0x1D5B, 'M', 'v'), - (0x1D5C, 'M', 'ᴥ'), - (0x1D5D, 'M', 'β'), - (0x1D5E, 'M', 'γ'), - (0x1D5F, 'M', 'δ'), - (0x1D60, 'M', 'φ'), - (0x1D61, 'M', 'χ'), - (0x1D62, 'M', 'i'), - (0x1D63, 'M', 'r'), - (0x1D64, 'M', 'u'), - (0x1D65, 'M', 'v'), - (0x1D66, 'M', 'β'), - (0x1D67, 'M', 'γ'), - (0x1D68, 'M', 'ρ'), - (0x1D69, 'M', 'φ'), - (0x1D6A, 'M', 'χ'), - (0x1D6B, 'V'), - (0x1D78, 'M', 'н'), - (0x1D79, 'V'), - (0x1D9B, 'M', 'ɒ'), - (0x1D9C, 'M', 'c'), - (0x1D9D, 'M', 'ɕ'), - (0x1D9E, 'M', 'ð'), - (0x1D9F, 'M', 'ɜ'), - (0x1DA0, 'M', 'f'), - (0x1DA1, 'M', 'ɟ'), - (0x1DA2, 'M', 'ɡ'), - (0x1DA3, 'M', 'ɥ'), - (0x1DA4, 'M', 'ɨ'), - (0x1DA5, 'M', 'ɩ'), - (0x1DA6, 'M', 'ɪ'), - (0x1DA7, 'M', 'ᵻ'), - (0x1DA8, 'M', 'ʝ'), - (0x1DA9, 'M', 'ɭ'), - (0x1DAA, 'M', 'ᶅ'), - (0x1DAB, 'M', 'ʟ'), - (0x1DAC, 'M', 'ɱ'), - (0x1DAD, 'M', 'ɰ'), - (0x1DAE, 'M', 'ɲ'), - (0x1DAF, 'M', 'ɳ'), - (0x1DB0, 'M', 'ɴ'), - (0x1DB1, 'M', 'ɵ'), - (0x1DB2, 'M', 'ɸ'), - (0x1DB3, 'M', 'ʂ'), - (0x1DB4, 'M', 'ʃ'), - (0x1DB5, 'M', 'ƫ'), - (0x1DB6, 'M', 'ʉ'), - (0x1DB7, 'M', 'ʊ'), - (0x1DB8, 'M', 'ᴜ'), - (0x1DB9, 'M', 'ʋ'), - (0x1DBA, 'M', 'ʌ'), - (0x1DBB, 'M', 'z'), - (0x1DBC, 'M', 'ʐ'), - (0x1DBD, 'M', 'ʑ'), - (0x1DBE, 'M', 'ʒ'), - (0x1DBF, 'M', 'θ'), - (0x1DC0, 'V'), - (0x1E00, 'M', 'ḁ'), - (0x1E01, 'V'), - (0x1E02, 'M', 'ḃ'), - (0x1E03, 'V'), - (0x1E04, 'M', 'ḅ'), - (0x1E05, 'V'), - (0x1E06, 'M', 'ḇ'), - (0x1E07, 'V'), - (0x1E08, 'M', 'ḉ'), - (0x1E09, 'V'), - (0x1E0A, 'M', 'ḋ'), - (0x1E0B, 'V'), - (0x1E0C, 'M', 'ḍ'), - (0x1E0D, 'V'), - (0x1E0E, 'M', 'ḏ'), - (0x1E0F, 'V'), - (0x1E10, 'M', 'ḑ'), - (0x1E11, 'V'), - (0x1E12, 'M', 'ḓ'), - (0x1E13, 'V'), - (0x1E14, 'M', 'ḕ'), - (0x1E15, 'V'), - (0x1E16, 'M', 'ḗ'), - (0x1E17, 'V'), - (0x1E18, 'M', 'ḙ'), - (0x1E19, 'V'), - (0x1E1A, 'M', 'ḛ'), - (0x1E1B, 'V'), - (0x1E1C, 'M', 'ḝ'), - (0x1E1D, 'V'), - (0x1E1E, 'M', 'ḟ'), - (0x1E1F, 'V'), - (0x1E20, 'M', 'ḡ'), - (0x1E21, 'V'), - (0x1E22, 'M', 'ḣ'), - (0x1E23, 'V'), - ] - -def _seg_17() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E24, 'M', 'ḥ'), - (0x1E25, 'V'), - (0x1E26, 'M', 'ḧ'), - (0x1E27, 'V'), - (0x1E28, 'M', 'ḩ'), - (0x1E29, 'V'), - (0x1E2A, 'M', 'ḫ'), - (0x1E2B, 'V'), - (0x1E2C, 'M', 'ḭ'), - (0x1E2D, 'V'), - (0x1E2E, 'M', 'ḯ'), - (0x1E2F, 'V'), - (0x1E30, 'M', 'ḱ'), - (0x1E31, 'V'), - (0x1E32, 'M', 'ḳ'), - (0x1E33, 'V'), - (0x1E34, 'M', 'ḵ'), - (0x1E35, 'V'), - (0x1E36, 'M', 'ḷ'), - (0x1E37, 'V'), - (0x1E38, 'M', 'ḹ'), - (0x1E39, 'V'), - (0x1E3A, 'M', 'ḻ'), - (0x1E3B, 'V'), - (0x1E3C, 'M', 'ḽ'), - (0x1E3D, 'V'), - (0x1E3E, 'M', 'ḿ'), - (0x1E3F, 'V'), - (0x1E40, 'M', 'ṁ'), - (0x1E41, 'V'), - (0x1E42, 'M', 'ṃ'), - (0x1E43, 'V'), - (0x1E44, 'M', 'ṅ'), - (0x1E45, 'V'), - (0x1E46, 'M', 'ṇ'), - (0x1E47, 'V'), - (0x1E48, 'M', 'ṉ'), - (0x1E49, 'V'), - (0x1E4A, 'M', 'ṋ'), - (0x1E4B, 'V'), - (0x1E4C, 'M', 'ṍ'), - (0x1E4D, 'V'), - (0x1E4E, 'M', 'ṏ'), - (0x1E4F, 'V'), - (0x1E50, 'M', 'ṑ'), - (0x1E51, 'V'), - (0x1E52, 'M', 'ṓ'), - (0x1E53, 'V'), - (0x1E54, 'M', 'ṕ'), - (0x1E55, 'V'), - (0x1E56, 'M', 'ṗ'), - (0x1E57, 'V'), - (0x1E58, 'M', 'ṙ'), - (0x1E59, 'V'), - (0x1E5A, 'M', 'ṛ'), - (0x1E5B, 'V'), - (0x1E5C, 'M', 'ṝ'), - (0x1E5D, 'V'), - (0x1E5E, 'M', 'ṟ'), - (0x1E5F, 'V'), - (0x1E60, 'M', 'ṡ'), - (0x1E61, 'V'), - (0x1E62, 'M', 'ṣ'), - (0x1E63, 'V'), - (0x1E64, 'M', 'ṥ'), - (0x1E65, 'V'), - (0x1E66, 'M', 'ṧ'), - (0x1E67, 'V'), - (0x1E68, 'M', 'ṩ'), - (0x1E69, 'V'), - (0x1E6A, 'M', 'ṫ'), - (0x1E6B, 'V'), - (0x1E6C, 'M', 'ṭ'), - (0x1E6D, 'V'), - (0x1E6E, 'M', 'ṯ'), - (0x1E6F, 'V'), - (0x1E70, 'M', 'ṱ'), - (0x1E71, 'V'), - (0x1E72, 'M', 'ṳ'), - (0x1E73, 'V'), - (0x1E74, 'M', 'ṵ'), - (0x1E75, 'V'), - (0x1E76, 'M', 'ṷ'), - (0x1E77, 'V'), - (0x1E78, 'M', 'ṹ'), - (0x1E79, 'V'), - (0x1E7A, 'M', 'ṻ'), - (0x1E7B, 'V'), - (0x1E7C, 'M', 'ṽ'), - (0x1E7D, 'V'), - (0x1E7E, 'M', 'ṿ'), - (0x1E7F, 'V'), - (0x1E80, 'M', 'ẁ'), - (0x1E81, 'V'), - (0x1E82, 'M', 'ẃ'), - (0x1E83, 'V'), - (0x1E84, 'M', 'ẅ'), - (0x1E85, 'V'), - (0x1E86, 'M', 'ẇ'), - (0x1E87, 'V'), - ] - -def _seg_18() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E88, 'M', 'ẉ'), - (0x1E89, 'V'), - (0x1E8A, 'M', 'ẋ'), - (0x1E8B, 'V'), - (0x1E8C, 'M', 'ẍ'), - (0x1E8D, 'V'), - (0x1E8E, 'M', 'ẏ'), - (0x1E8F, 'V'), - (0x1E90, 'M', 'ẑ'), - (0x1E91, 'V'), - (0x1E92, 'M', 'ẓ'), - (0x1E93, 'V'), - (0x1E94, 'M', 'ẕ'), - (0x1E95, 'V'), - (0x1E9A, 'M', 'aʾ'), - (0x1E9B, 'M', 'ṡ'), - (0x1E9C, 'V'), - (0x1E9E, 'M', 'ss'), - (0x1E9F, 'V'), - (0x1EA0, 'M', 'ạ'), - (0x1EA1, 'V'), - (0x1EA2, 'M', 'ả'), - (0x1EA3, 'V'), - (0x1EA4, 'M', 'ấ'), - (0x1EA5, 'V'), - (0x1EA6, 'M', 'ầ'), - (0x1EA7, 'V'), - (0x1EA8, 'M', 'ẩ'), - (0x1EA9, 'V'), - (0x1EAA, 'M', 'ẫ'), - (0x1EAB, 'V'), - (0x1EAC, 'M', 'ậ'), - (0x1EAD, 'V'), - (0x1EAE, 'M', 'ắ'), - (0x1EAF, 'V'), - (0x1EB0, 'M', 'ằ'), - (0x1EB1, 'V'), - (0x1EB2, 'M', 'ẳ'), - (0x1EB3, 'V'), - (0x1EB4, 'M', 'ẵ'), - (0x1EB5, 'V'), - (0x1EB6, 'M', 'ặ'), - (0x1EB7, 'V'), - (0x1EB8, 'M', 'ẹ'), - (0x1EB9, 'V'), - (0x1EBA, 'M', 'ẻ'), - (0x1EBB, 'V'), - (0x1EBC, 'M', 'ẽ'), - (0x1EBD, 'V'), - (0x1EBE, 'M', 'ế'), - (0x1EBF, 'V'), - (0x1EC0, 'M', 'ề'), - (0x1EC1, 'V'), - (0x1EC2, 'M', 'ể'), - (0x1EC3, 'V'), - (0x1EC4, 'M', 'ễ'), - (0x1EC5, 'V'), - (0x1EC6, 'M', 'ệ'), - (0x1EC7, 'V'), - (0x1EC8, 'M', 'ỉ'), - (0x1EC9, 'V'), - (0x1ECA, 'M', 'ị'), - (0x1ECB, 'V'), - (0x1ECC, 'M', 'ọ'), - (0x1ECD, 'V'), - (0x1ECE, 'M', 'ỏ'), - (0x1ECF, 'V'), - (0x1ED0, 'M', 'ố'), - (0x1ED1, 'V'), - (0x1ED2, 'M', 'ồ'), - (0x1ED3, 'V'), - (0x1ED4, 'M', 'ổ'), - (0x1ED5, 'V'), - (0x1ED6, 'M', 'ỗ'), - (0x1ED7, 'V'), - (0x1ED8, 'M', 'ộ'), - (0x1ED9, 'V'), - (0x1EDA, 'M', 'ớ'), - (0x1EDB, 'V'), - (0x1EDC, 'M', 'ờ'), - (0x1EDD, 'V'), - (0x1EDE, 'M', 'ở'), - (0x1EDF, 'V'), - (0x1EE0, 'M', 'ỡ'), - (0x1EE1, 'V'), - (0x1EE2, 'M', 'ợ'), - (0x1EE3, 'V'), - (0x1EE4, 'M', 'ụ'), - (0x1EE5, 'V'), - (0x1EE6, 'M', 'ủ'), - (0x1EE7, 'V'), - (0x1EE8, 'M', 'ứ'), - (0x1EE9, 'V'), - (0x1EEA, 'M', 'ừ'), - (0x1EEB, 'V'), - (0x1EEC, 'M', 'ử'), - (0x1EED, 'V'), - (0x1EEE, 'M', 'ữ'), - (0x1EEF, 'V'), - (0x1EF0, 'M', 'ự'), - ] - -def _seg_19() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1EF1, 'V'), - (0x1EF2, 'M', 'ỳ'), - (0x1EF3, 'V'), - (0x1EF4, 'M', 'ỵ'), - (0x1EF5, 'V'), - (0x1EF6, 'M', 'ỷ'), - (0x1EF7, 'V'), - (0x1EF8, 'M', 'ỹ'), - (0x1EF9, 'V'), - (0x1EFA, 'M', 'ỻ'), - (0x1EFB, 'V'), - (0x1EFC, 'M', 'ỽ'), - (0x1EFD, 'V'), - (0x1EFE, 'M', 'ỿ'), - (0x1EFF, 'V'), - (0x1F08, 'M', 'ἀ'), - (0x1F09, 'M', 'ἁ'), - (0x1F0A, 'M', 'ἂ'), - (0x1F0B, 'M', 'ἃ'), - (0x1F0C, 'M', 'ἄ'), - (0x1F0D, 'M', 'ἅ'), - (0x1F0E, 'M', 'ἆ'), - (0x1F0F, 'M', 'ἇ'), - (0x1F10, 'V'), - (0x1F16, 'X'), - (0x1F18, 'M', 'ἐ'), - (0x1F19, 'M', 'ἑ'), - (0x1F1A, 'M', 'ἒ'), - (0x1F1B, 'M', 'ἓ'), - (0x1F1C, 'M', 'ἔ'), - (0x1F1D, 'M', 'ἕ'), - (0x1F1E, 'X'), - (0x1F20, 'V'), - (0x1F28, 'M', 'ἠ'), - (0x1F29, 'M', 'ἡ'), - (0x1F2A, 'M', 'ἢ'), - (0x1F2B, 'M', 'ἣ'), - (0x1F2C, 'M', 'ἤ'), - (0x1F2D, 'M', 'ἥ'), - (0x1F2E, 'M', 'ἦ'), - (0x1F2F, 'M', 'ἧ'), - (0x1F30, 'V'), - (0x1F38, 'M', 'ἰ'), - (0x1F39, 'M', 'ἱ'), - (0x1F3A, 'M', 'ἲ'), - (0x1F3B, 'M', 'ἳ'), - (0x1F3C, 'M', 'ἴ'), - (0x1F3D, 'M', 'ἵ'), - (0x1F3E, 'M', 'ἶ'), - (0x1F3F, 'M', 'ἷ'), - (0x1F40, 'V'), - (0x1F46, 'X'), - (0x1F48, 'M', 'ὀ'), - (0x1F49, 'M', 'ὁ'), - (0x1F4A, 'M', 'ὂ'), - (0x1F4B, 'M', 'ὃ'), - (0x1F4C, 'M', 'ὄ'), - (0x1F4D, 'M', 'ὅ'), - (0x1F4E, 'X'), - (0x1F50, 'V'), - (0x1F58, 'X'), - (0x1F59, 'M', 'ὑ'), - (0x1F5A, 'X'), - (0x1F5B, 'M', 'ὓ'), - (0x1F5C, 'X'), - (0x1F5D, 'M', 'ὕ'), - (0x1F5E, 'X'), - (0x1F5F, 'M', 'ὗ'), - (0x1F60, 'V'), - (0x1F68, 'M', 'ὠ'), - (0x1F69, 'M', 'ὡ'), - (0x1F6A, 'M', 'ὢ'), - (0x1F6B, 'M', 'ὣ'), - (0x1F6C, 'M', 'ὤ'), - (0x1F6D, 'M', 'ὥ'), - (0x1F6E, 'M', 'ὦ'), - (0x1F6F, 'M', 'ὧ'), - (0x1F70, 'V'), - (0x1F71, 'M', 'ά'), - (0x1F72, 'V'), - (0x1F73, 'M', 'έ'), - (0x1F74, 'V'), - (0x1F75, 'M', 'ή'), - (0x1F76, 'V'), - (0x1F77, 'M', 'ί'), - (0x1F78, 'V'), - (0x1F79, 'M', 'ό'), - (0x1F7A, 'V'), - (0x1F7B, 'M', 'ύ'), - (0x1F7C, 'V'), - (0x1F7D, 'M', 'ώ'), - (0x1F7E, 'X'), - (0x1F80, 'M', 'ἀι'), - (0x1F81, 'M', 'ἁι'), - (0x1F82, 'M', 'ἂι'), - (0x1F83, 'M', 'ἃι'), - (0x1F84, 'M', 'ἄι'), - (0x1F85, 'M', 'ἅι'), - (0x1F86, 'M', 'ἆι'), - (0x1F87, 'M', 'ἇι'), - ] - -def _seg_20() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1F88, 'M', 'ἀι'), - (0x1F89, 'M', 'ἁι'), - (0x1F8A, 'M', 'ἂι'), - (0x1F8B, 'M', 'ἃι'), - (0x1F8C, 'M', 'ἄι'), - (0x1F8D, 'M', 'ἅι'), - (0x1F8E, 'M', 'ἆι'), - (0x1F8F, 'M', 'ἇι'), - (0x1F90, 'M', 'ἠι'), - (0x1F91, 'M', 'ἡι'), - (0x1F92, 'M', 'ἢι'), - (0x1F93, 'M', 'ἣι'), - (0x1F94, 'M', 'ἤι'), - (0x1F95, 'M', 'ἥι'), - (0x1F96, 'M', 'ἦι'), - (0x1F97, 'M', 'ἧι'), - (0x1F98, 'M', 'ἠι'), - (0x1F99, 'M', 'ἡι'), - (0x1F9A, 'M', 'ἢι'), - (0x1F9B, 'M', 'ἣι'), - (0x1F9C, 'M', 'ἤι'), - (0x1F9D, 'M', 'ἥι'), - (0x1F9E, 'M', 'ἦι'), - (0x1F9F, 'M', 'ἧι'), - (0x1FA0, 'M', 'ὠι'), - (0x1FA1, 'M', 'ὡι'), - (0x1FA2, 'M', 'ὢι'), - (0x1FA3, 'M', 'ὣι'), - (0x1FA4, 'M', 'ὤι'), - (0x1FA5, 'M', 'ὥι'), - (0x1FA6, 'M', 'ὦι'), - (0x1FA7, 'M', 'ὧι'), - (0x1FA8, 'M', 'ὠι'), - (0x1FA9, 'M', 'ὡι'), - (0x1FAA, 'M', 'ὢι'), - (0x1FAB, 'M', 'ὣι'), - (0x1FAC, 'M', 'ὤι'), - (0x1FAD, 'M', 'ὥι'), - (0x1FAE, 'M', 'ὦι'), - (0x1FAF, 'M', 'ὧι'), - (0x1FB0, 'V'), - (0x1FB2, 'M', 'ὰι'), - (0x1FB3, 'M', 'αι'), - (0x1FB4, 'M', 'άι'), - (0x1FB5, 'X'), - (0x1FB6, 'V'), - (0x1FB7, 'M', 'ᾶι'), - (0x1FB8, 'M', 'ᾰ'), - (0x1FB9, 'M', 'ᾱ'), - (0x1FBA, 'M', 'ὰ'), - (0x1FBB, 'M', 'ά'), - (0x1FBC, 'M', 'αι'), - (0x1FBD, '3', ' ̓'), - (0x1FBE, 'M', 'ι'), - (0x1FBF, '3', ' ̓'), - (0x1FC0, '3', ' ͂'), - (0x1FC1, '3', ' ̈͂'), - (0x1FC2, 'M', 'ὴι'), - (0x1FC3, 'M', 'ηι'), - (0x1FC4, 'M', 'ήι'), - (0x1FC5, 'X'), - (0x1FC6, 'V'), - (0x1FC7, 'M', 'ῆι'), - (0x1FC8, 'M', 'ὲ'), - (0x1FC9, 'M', 'έ'), - (0x1FCA, 'M', 'ὴ'), - (0x1FCB, 'M', 'ή'), - (0x1FCC, 'M', 'ηι'), - (0x1FCD, '3', ' ̓̀'), - (0x1FCE, '3', ' ̓́'), - (0x1FCF, '3', ' ̓͂'), - (0x1FD0, 'V'), - (0x1FD3, 'M', 'ΐ'), - (0x1FD4, 'X'), - (0x1FD6, 'V'), - (0x1FD8, 'M', 'ῐ'), - (0x1FD9, 'M', 'ῑ'), - (0x1FDA, 'M', 'ὶ'), - (0x1FDB, 'M', 'ί'), - (0x1FDC, 'X'), - (0x1FDD, '3', ' ̔̀'), - (0x1FDE, '3', ' ̔́'), - (0x1FDF, '3', ' ̔͂'), - (0x1FE0, 'V'), - (0x1FE3, 'M', 'ΰ'), - (0x1FE4, 'V'), - (0x1FE8, 'M', 'ῠ'), - (0x1FE9, 'M', 'ῡ'), - (0x1FEA, 'M', 'ὺ'), - (0x1FEB, 'M', 'ύ'), - (0x1FEC, 'M', 'ῥ'), - (0x1FED, '3', ' ̈̀'), - (0x1FEE, '3', ' ̈́'), - (0x1FEF, '3', '`'), - (0x1FF0, 'X'), - (0x1FF2, 'M', 'ὼι'), - (0x1FF3, 'M', 'ωι'), - (0x1FF4, 'M', 'ώι'), - (0x1FF5, 'X'), - (0x1FF6, 'V'), - ] - -def _seg_21() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1FF7, 'M', 'ῶι'), - (0x1FF8, 'M', 'ὸ'), - (0x1FF9, 'M', 'ό'), - (0x1FFA, 'M', 'ὼ'), - (0x1FFB, 'M', 'ώ'), - (0x1FFC, 'M', 'ωι'), - (0x1FFD, '3', ' ́'), - (0x1FFE, '3', ' ̔'), - (0x1FFF, 'X'), - (0x2000, '3', ' '), - (0x200B, 'I'), - (0x200C, 'D', ''), - (0x200E, 'X'), - (0x2010, 'V'), - (0x2011, 'M', '‐'), - (0x2012, 'V'), - (0x2017, '3', ' ̳'), - (0x2018, 'V'), - (0x2024, 'X'), - (0x2027, 'V'), - (0x2028, 'X'), - (0x202F, '3', ' '), - (0x2030, 'V'), - (0x2033, 'M', '′′'), - (0x2034, 'M', '′′′'), - (0x2035, 'V'), - (0x2036, 'M', '‵‵'), - (0x2037, 'M', '‵‵‵'), - (0x2038, 'V'), - (0x203C, '3', '!!'), - (0x203D, 'V'), - (0x203E, '3', ' ̅'), - (0x203F, 'V'), - (0x2047, '3', '??'), - (0x2048, '3', '?!'), - (0x2049, '3', '!?'), - (0x204A, 'V'), - (0x2057, 'M', '′′′′'), - (0x2058, 'V'), - (0x205F, '3', ' '), - (0x2060, 'I'), - (0x2061, 'X'), - (0x2064, 'I'), - (0x2065, 'X'), - (0x2070, 'M', '0'), - (0x2071, 'M', 'i'), - (0x2072, 'X'), - (0x2074, 'M', '4'), - (0x2075, 'M', '5'), - (0x2076, 'M', '6'), - (0x2077, 'M', '7'), - (0x2078, 'M', '8'), - (0x2079, 'M', '9'), - (0x207A, '3', '+'), - (0x207B, 'M', '−'), - (0x207C, '3', '='), - (0x207D, '3', '('), - (0x207E, '3', ')'), - (0x207F, 'M', 'n'), - (0x2080, 'M', '0'), - (0x2081, 'M', '1'), - (0x2082, 'M', '2'), - (0x2083, 'M', '3'), - (0x2084, 'M', '4'), - (0x2085, 'M', '5'), - (0x2086, 'M', '6'), - (0x2087, 'M', '7'), - (0x2088, 'M', '8'), - (0x2089, 'M', '9'), - (0x208A, '3', '+'), - (0x208B, 'M', '−'), - (0x208C, '3', '='), - (0x208D, '3', '('), - (0x208E, '3', ')'), - (0x208F, 'X'), - (0x2090, 'M', 'a'), - (0x2091, 'M', 'e'), - (0x2092, 'M', 'o'), - (0x2093, 'M', 'x'), - (0x2094, 'M', 'ə'), - (0x2095, 'M', 'h'), - (0x2096, 'M', 'k'), - (0x2097, 'M', 'l'), - (0x2098, 'M', 'm'), - (0x2099, 'M', 'n'), - (0x209A, 'M', 'p'), - (0x209B, 'M', 's'), - (0x209C, 'M', 't'), - (0x209D, 'X'), - (0x20A0, 'V'), - (0x20A8, 'M', 'rs'), - (0x20A9, 'V'), - (0x20C1, 'X'), - (0x20D0, 'V'), - (0x20F1, 'X'), - (0x2100, '3', 'a/c'), - (0x2101, '3', 'a/s'), - (0x2102, 'M', 'c'), - (0x2103, 'M', '°c'), - (0x2104, 'V'), - ] - -def _seg_22() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2105, '3', 'c/o'), - (0x2106, '3', 'c/u'), - (0x2107, 'M', 'ɛ'), - (0x2108, 'V'), - (0x2109, 'M', '°f'), - (0x210A, 'M', 'g'), - (0x210B, 'M', 'h'), - (0x210F, 'M', 'ħ'), - (0x2110, 'M', 'i'), - (0x2112, 'M', 'l'), - (0x2114, 'V'), - (0x2115, 'M', 'n'), - (0x2116, 'M', 'no'), - (0x2117, 'V'), - (0x2119, 'M', 'p'), - (0x211A, 'M', 'q'), - (0x211B, 'M', 'r'), - (0x211E, 'V'), - (0x2120, 'M', 'sm'), - (0x2121, 'M', 'tel'), - (0x2122, 'M', 'tm'), - (0x2123, 'V'), - (0x2124, 'M', 'z'), - (0x2125, 'V'), - (0x2126, 'M', 'ω'), - (0x2127, 'V'), - (0x2128, 'M', 'z'), - (0x2129, 'V'), - (0x212A, 'M', 'k'), - (0x212B, 'M', 'å'), - (0x212C, 'M', 'b'), - (0x212D, 'M', 'c'), - (0x212E, 'V'), - (0x212F, 'M', 'e'), - (0x2131, 'M', 'f'), - (0x2132, 'X'), - (0x2133, 'M', 'm'), - (0x2134, 'M', 'o'), - (0x2135, 'M', 'א'), - (0x2136, 'M', 'ב'), - (0x2137, 'M', 'ג'), - (0x2138, 'M', 'ד'), - (0x2139, 'M', 'i'), - (0x213A, 'V'), - (0x213B, 'M', 'fax'), - (0x213C, 'M', 'π'), - (0x213D, 'M', 'γ'), - (0x213F, 'M', 'π'), - (0x2140, 'M', '∑'), - (0x2141, 'V'), - (0x2145, 'M', 'd'), - (0x2147, 'M', 'e'), - (0x2148, 'M', 'i'), - (0x2149, 'M', 'j'), - (0x214A, 'V'), - (0x2150, 'M', '1⁄7'), - (0x2151, 'M', '1⁄9'), - (0x2152, 'M', '1⁄10'), - (0x2153, 'M', '1⁄3'), - (0x2154, 'M', '2⁄3'), - (0x2155, 'M', '1⁄5'), - (0x2156, 'M', '2⁄5'), - (0x2157, 'M', '3⁄5'), - (0x2158, 'M', '4⁄5'), - (0x2159, 'M', '1⁄6'), - (0x215A, 'M', '5⁄6'), - (0x215B, 'M', '1⁄8'), - (0x215C, 'M', '3⁄8'), - (0x215D, 'M', '5⁄8'), - (0x215E, 'M', '7⁄8'), - (0x215F, 'M', '1⁄'), - (0x2160, 'M', 'i'), - (0x2161, 'M', 'ii'), - (0x2162, 'M', 'iii'), - (0x2163, 'M', 'iv'), - (0x2164, 'M', 'v'), - (0x2165, 'M', 'vi'), - (0x2166, 'M', 'vii'), - (0x2167, 'M', 'viii'), - (0x2168, 'M', 'ix'), - (0x2169, 'M', 'x'), - (0x216A, 'M', 'xi'), - (0x216B, 'M', 'xii'), - (0x216C, 'M', 'l'), - (0x216D, 'M', 'c'), - (0x216E, 'M', 'd'), - (0x216F, 'M', 'm'), - (0x2170, 'M', 'i'), - (0x2171, 'M', 'ii'), - (0x2172, 'M', 'iii'), - (0x2173, 'M', 'iv'), - (0x2174, 'M', 'v'), - (0x2175, 'M', 'vi'), - (0x2176, 'M', 'vii'), - (0x2177, 'M', 'viii'), - (0x2178, 'M', 'ix'), - (0x2179, 'M', 'x'), - (0x217A, 'M', 'xi'), - (0x217B, 'M', 'xii'), - (0x217C, 'M', 'l'), - ] - -def _seg_23() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x217D, 'M', 'c'), - (0x217E, 'M', 'd'), - (0x217F, 'M', 'm'), - (0x2180, 'V'), - (0x2183, 'X'), - (0x2184, 'V'), - (0x2189, 'M', '0⁄3'), - (0x218A, 'V'), - (0x218C, 'X'), - (0x2190, 'V'), - (0x222C, 'M', '∫∫'), - (0x222D, 'M', '∫∫∫'), - (0x222E, 'V'), - (0x222F, 'M', '∮∮'), - (0x2230, 'M', '∮∮∮'), - (0x2231, 'V'), - (0x2260, '3'), - (0x2261, 'V'), - (0x226E, '3'), - (0x2270, 'V'), - (0x2329, 'M', '〈'), - (0x232A, 'M', '〉'), - (0x232B, 'V'), - (0x2427, 'X'), - (0x2440, 'V'), - (0x244B, 'X'), - (0x2460, 'M', '1'), - (0x2461, 'M', '2'), - (0x2462, 'M', '3'), - (0x2463, 'M', '4'), - (0x2464, 'M', '5'), - (0x2465, 'M', '6'), - (0x2466, 'M', '7'), - (0x2467, 'M', '8'), - (0x2468, 'M', '9'), - (0x2469, 'M', '10'), - (0x246A, 'M', '11'), - (0x246B, 'M', '12'), - (0x246C, 'M', '13'), - (0x246D, 'M', '14'), - (0x246E, 'M', '15'), - (0x246F, 'M', '16'), - (0x2470, 'M', '17'), - (0x2471, 'M', '18'), - (0x2472, 'M', '19'), - (0x2473, 'M', '20'), - (0x2474, '3', '(1)'), - (0x2475, '3', '(2)'), - (0x2476, '3', '(3)'), - (0x2477, '3', '(4)'), - (0x2478, '3', '(5)'), - (0x2479, '3', '(6)'), - (0x247A, '3', '(7)'), - (0x247B, '3', '(8)'), - (0x247C, '3', '(9)'), - (0x247D, '3', '(10)'), - (0x247E, '3', '(11)'), - (0x247F, '3', '(12)'), - (0x2480, '3', '(13)'), - (0x2481, '3', '(14)'), - (0x2482, '3', '(15)'), - (0x2483, '3', '(16)'), - (0x2484, '3', '(17)'), - (0x2485, '3', '(18)'), - (0x2486, '3', '(19)'), - (0x2487, '3', '(20)'), - (0x2488, 'X'), - (0x249C, '3', '(a)'), - (0x249D, '3', '(b)'), - (0x249E, '3', '(c)'), - (0x249F, '3', '(d)'), - (0x24A0, '3', '(e)'), - (0x24A1, '3', '(f)'), - (0x24A2, '3', '(g)'), - (0x24A3, '3', '(h)'), - (0x24A4, '3', '(i)'), - (0x24A5, '3', '(j)'), - (0x24A6, '3', '(k)'), - (0x24A7, '3', '(l)'), - (0x24A8, '3', '(m)'), - (0x24A9, '3', '(n)'), - (0x24AA, '3', '(o)'), - (0x24AB, '3', '(p)'), - (0x24AC, '3', '(q)'), - (0x24AD, '3', '(r)'), - (0x24AE, '3', '(s)'), - (0x24AF, '3', '(t)'), - (0x24B0, '3', '(u)'), - (0x24B1, '3', '(v)'), - (0x24B2, '3', '(w)'), - (0x24B3, '3', '(x)'), - (0x24B4, '3', '(y)'), - (0x24B5, '3', '(z)'), - (0x24B6, 'M', 'a'), - (0x24B7, 'M', 'b'), - (0x24B8, 'M', 'c'), - (0x24B9, 'M', 'd'), - (0x24BA, 'M', 'e'), - (0x24BB, 'M', 'f'), - (0x24BC, 'M', 'g'), - ] - -def _seg_24() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x24BD, 'M', 'h'), - (0x24BE, 'M', 'i'), - (0x24BF, 'M', 'j'), - (0x24C0, 'M', 'k'), - (0x24C1, 'M', 'l'), - (0x24C2, 'M', 'm'), - (0x24C3, 'M', 'n'), - (0x24C4, 'M', 'o'), - (0x24C5, 'M', 'p'), - (0x24C6, 'M', 'q'), - (0x24C7, 'M', 'r'), - (0x24C8, 'M', 's'), - (0x24C9, 'M', 't'), - (0x24CA, 'M', 'u'), - (0x24CB, 'M', 'v'), - (0x24CC, 'M', 'w'), - (0x24CD, 'M', 'x'), - (0x24CE, 'M', 'y'), - (0x24CF, 'M', 'z'), - (0x24D0, 'M', 'a'), - (0x24D1, 'M', 'b'), - (0x24D2, 'M', 'c'), - (0x24D3, 'M', 'd'), - (0x24D4, 'M', 'e'), - (0x24D5, 'M', 'f'), - (0x24D6, 'M', 'g'), - (0x24D7, 'M', 'h'), - (0x24D8, 'M', 'i'), - (0x24D9, 'M', 'j'), - (0x24DA, 'M', 'k'), - (0x24DB, 'M', 'l'), - (0x24DC, 'M', 'm'), - (0x24DD, 'M', 'n'), - (0x24DE, 'M', 'o'), - (0x24DF, 'M', 'p'), - (0x24E0, 'M', 'q'), - (0x24E1, 'M', 'r'), - (0x24E2, 'M', 's'), - (0x24E3, 'M', 't'), - (0x24E4, 'M', 'u'), - (0x24E5, 'M', 'v'), - (0x24E6, 'M', 'w'), - (0x24E7, 'M', 'x'), - (0x24E8, 'M', 'y'), - (0x24E9, 'M', 'z'), - (0x24EA, 'M', '0'), - (0x24EB, 'V'), - (0x2A0C, 'M', '∫∫∫∫'), - (0x2A0D, 'V'), - (0x2A74, '3', '::='), - (0x2A75, '3', '=='), - (0x2A76, '3', '==='), - (0x2A77, 'V'), - (0x2ADC, 'M', '⫝̸'), - (0x2ADD, 'V'), - (0x2B74, 'X'), - (0x2B76, 'V'), - (0x2B96, 'X'), - (0x2B97, 'V'), - (0x2C00, 'M', 'ⰰ'), - (0x2C01, 'M', 'ⰱ'), - (0x2C02, 'M', 'ⰲ'), - (0x2C03, 'M', 'ⰳ'), - (0x2C04, 'M', 'ⰴ'), - (0x2C05, 'M', 'ⰵ'), - (0x2C06, 'M', 'ⰶ'), - (0x2C07, 'M', 'ⰷ'), - (0x2C08, 'M', 'ⰸ'), - (0x2C09, 'M', 'ⰹ'), - (0x2C0A, 'M', 'ⰺ'), - (0x2C0B, 'M', 'ⰻ'), - (0x2C0C, 'M', 'ⰼ'), - (0x2C0D, 'M', 'ⰽ'), - (0x2C0E, 'M', 'ⰾ'), - (0x2C0F, 'M', 'ⰿ'), - (0x2C10, 'M', 'ⱀ'), - (0x2C11, 'M', 'ⱁ'), - (0x2C12, 'M', 'ⱂ'), - (0x2C13, 'M', 'ⱃ'), - (0x2C14, 'M', 'ⱄ'), - (0x2C15, 'M', 'ⱅ'), - (0x2C16, 'M', 'ⱆ'), - (0x2C17, 'M', 'ⱇ'), - (0x2C18, 'M', 'ⱈ'), - (0x2C19, 'M', 'ⱉ'), - (0x2C1A, 'M', 'ⱊ'), - (0x2C1B, 'M', 'ⱋ'), - (0x2C1C, 'M', 'ⱌ'), - (0x2C1D, 'M', 'ⱍ'), - (0x2C1E, 'M', 'ⱎ'), - (0x2C1F, 'M', 'ⱏ'), - (0x2C20, 'M', 'ⱐ'), - (0x2C21, 'M', 'ⱑ'), - (0x2C22, 'M', 'ⱒ'), - (0x2C23, 'M', 'ⱓ'), - (0x2C24, 'M', 'ⱔ'), - (0x2C25, 'M', 'ⱕ'), - (0x2C26, 'M', 'ⱖ'), - (0x2C27, 'M', 'ⱗ'), - (0x2C28, 'M', 'ⱘ'), - ] - -def _seg_25() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2C29, 'M', 'ⱙ'), - (0x2C2A, 'M', 'ⱚ'), - (0x2C2B, 'M', 'ⱛ'), - (0x2C2C, 'M', 'ⱜ'), - (0x2C2D, 'M', 'ⱝ'), - (0x2C2E, 'M', 'ⱞ'), - (0x2C2F, 'M', 'ⱟ'), - (0x2C30, 'V'), - (0x2C60, 'M', 'ⱡ'), - (0x2C61, 'V'), - (0x2C62, 'M', 'ɫ'), - (0x2C63, 'M', 'ᵽ'), - (0x2C64, 'M', 'ɽ'), - (0x2C65, 'V'), - (0x2C67, 'M', 'ⱨ'), - (0x2C68, 'V'), - (0x2C69, 'M', 'ⱪ'), - (0x2C6A, 'V'), - (0x2C6B, 'M', 'ⱬ'), - (0x2C6C, 'V'), - (0x2C6D, 'M', 'ɑ'), - (0x2C6E, 'M', 'ɱ'), - (0x2C6F, 'M', 'ɐ'), - (0x2C70, 'M', 'ɒ'), - (0x2C71, 'V'), - (0x2C72, 'M', 'ⱳ'), - (0x2C73, 'V'), - (0x2C75, 'M', 'ⱶ'), - (0x2C76, 'V'), - (0x2C7C, 'M', 'j'), - (0x2C7D, 'M', 'v'), - (0x2C7E, 'M', 'ȿ'), - (0x2C7F, 'M', 'ɀ'), - (0x2C80, 'M', 'ⲁ'), - (0x2C81, 'V'), - (0x2C82, 'M', 'ⲃ'), - (0x2C83, 'V'), - (0x2C84, 'M', 'ⲅ'), - (0x2C85, 'V'), - (0x2C86, 'M', 'ⲇ'), - (0x2C87, 'V'), - (0x2C88, 'M', 'ⲉ'), - (0x2C89, 'V'), - (0x2C8A, 'M', 'ⲋ'), - (0x2C8B, 'V'), - (0x2C8C, 'M', 'ⲍ'), - (0x2C8D, 'V'), - (0x2C8E, 'M', 'ⲏ'), - (0x2C8F, 'V'), - (0x2C90, 'M', 'ⲑ'), - (0x2C91, 'V'), - (0x2C92, 'M', 'ⲓ'), - (0x2C93, 'V'), - (0x2C94, 'M', 'ⲕ'), - (0x2C95, 'V'), - (0x2C96, 'M', 'ⲗ'), - (0x2C97, 'V'), - (0x2C98, 'M', 'ⲙ'), - (0x2C99, 'V'), - (0x2C9A, 'M', 'ⲛ'), - (0x2C9B, 'V'), - (0x2C9C, 'M', 'ⲝ'), - (0x2C9D, 'V'), - (0x2C9E, 'M', 'ⲟ'), - (0x2C9F, 'V'), - (0x2CA0, 'M', 'ⲡ'), - (0x2CA1, 'V'), - (0x2CA2, 'M', 'ⲣ'), - (0x2CA3, 'V'), - (0x2CA4, 'M', 'ⲥ'), - (0x2CA5, 'V'), - (0x2CA6, 'M', 'ⲧ'), - (0x2CA7, 'V'), - (0x2CA8, 'M', 'ⲩ'), - (0x2CA9, 'V'), - (0x2CAA, 'M', 'ⲫ'), - (0x2CAB, 'V'), - (0x2CAC, 'M', 'ⲭ'), - (0x2CAD, 'V'), - (0x2CAE, 'M', 'ⲯ'), - (0x2CAF, 'V'), - (0x2CB0, 'M', 'ⲱ'), - (0x2CB1, 'V'), - (0x2CB2, 'M', 'ⲳ'), - (0x2CB3, 'V'), - (0x2CB4, 'M', 'ⲵ'), - (0x2CB5, 'V'), - (0x2CB6, 'M', 'ⲷ'), - (0x2CB7, 'V'), - (0x2CB8, 'M', 'ⲹ'), - (0x2CB9, 'V'), - (0x2CBA, 'M', 'ⲻ'), - (0x2CBB, 'V'), - (0x2CBC, 'M', 'ⲽ'), - (0x2CBD, 'V'), - (0x2CBE, 'M', 'ⲿ'), - (0x2CBF, 'V'), - (0x2CC0, 'M', 'ⳁ'), - (0x2CC1, 'V'), - (0x2CC2, 'M', 'ⳃ'), - ] - -def _seg_26() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2CC3, 'V'), - (0x2CC4, 'M', 'ⳅ'), - (0x2CC5, 'V'), - (0x2CC6, 'M', 'ⳇ'), - (0x2CC7, 'V'), - (0x2CC8, 'M', 'ⳉ'), - (0x2CC9, 'V'), - (0x2CCA, 'M', 'ⳋ'), - (0x2CCB, 'V'), - (0x2CCC, 'M', 'ⳍ'), - (0x2CCD, 'V'), - (0x2CCE, 'M', 'ⳏ'), - (0x2CCF, 'V'), - (0x2CD0, 'M', 'ⳑ'), - (0x2CD1, 'V'), - (0x2CD2, 'M', 'ⳓ'), - (0x2CD3, 'V'), - (0x2CD4, 'M', 'ⳕ'), - (0x2CD5, 'V'), - (0x2CD6, 'M', 'ⳗ'), - (0x2CD7, 'V'), - (0x2CD8, 'M', 'ⳙ'), - (0x2CD9, 'V'), - (0x2CDA, 'M', 'ⳛ'), - (0x2CDB, 'V'), - (0x2CDC, 'M', 'ⳝ'), - (0x2CDD, 'V'), - (0x2CDE, 'M', 'ⳟ'), - (0x2CDF, 'V'), - (0x2CE0, 'M', 'ⳡ'), - (0x2CE1, 'V'), - (0x2CE2, 'M', 'ⳣ'), - (0x2CE3, 'V'), - (0x2CEB, 'M', 'ⳬ'), - (0x2CEC, 'V'), - (0x2CED, 'M', 'ⳮ'), - (0x2CEE, 'V'), - (0x2CF2, 'M', 'ⳳ'), - (0x2CF3, 'V'), - (0x2CF4, 'X'), - (0x2CF9, 'V'), - (0x2D26, 'X'), - (0x2D27, 'V'), - (0x2D28, 'X'), - (0x2D2D, 'V'), - (0x2D2E, 'X'), - (0x2D30, 'V'), - (0x2D68, 'X'), - (0x2D6F, 'M', 'ⵡ'), - (0x2D70, 'V'), - (0x2D71, 'X'), - (0x2D7F, 'V'), - (0x2D97, 'X'), - (0x2DA0, 'V'), - (0x2DA7, 'X'), - (0x2DA8, 'V'), - (0x2DAF, 'X'), - (0x2DB0, 'V'), - (0x2DB7, 'X'), - (0x2DB8, 'V'), - (0x2DBF, 'X'), - (0x2DC0, 'V'), - (0x2DC7, 'X'), - (0x2DC8, 'V'), - (0x2DCF, 'X'), - (0x2DD0, 'V'), - (0x2DD7, 'X'), - (0x2DD8, 'V'), - (0x2DDF, 'X'), - (0x2DE0, 'V'), - (0x2E5E, 'X'), - (0x2E80, 'V'), - (0x2E9A, 'X'), - (0x2E9B, 'V'), - (0x2E9F, 'M', '母'), - (0x2EA0, 'V'), - (0x2EF3, 'M', '龟'), - (0x2EF4, 'X'), - (0x2F00, 'M', '一'), - (0x2F01, 'M', '丨'), - (0x2F02, 'M', '丶'), - (0x2F03, 'M', '丿'), - (0x2F04, 'M', '乙'), - (0x2F05, 'M', '亅'), - (0x2F06, 'M', '二'), - (0x2F07, 'M', '亠'), - (0x2F08, 'M', '人'), - (0x2F09, 'M', '儿'), - (0x2F0A, 'M', '入'), - (0x2F0B, 'M', '八'), - (0x2F0C, 'M', '冂'), - (0x2F0D, 'M', '冖'), - (0x2F0E, 'M', '冫'), - (0x2F0F, 'M', '几'), - (0x2F10, 'M', '凵'), - (0x2F11, 'M', '刀'), - (0x2F12, 'M', '力'), - (0x2F13, 'M', '勹'), - (0x2F14, 'M', '匕'), - (0x2F15, 'M', '匚'), - ] - -def _seg_27() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F16, 'M', '匸'), - (0x2F17, 'M', '十'), - (0x2F18, 'M', '卜'), - (0x2F19, 'M', '卩'), - (0x2F1A, 'M', '厂'), - (0x2F1B, 'M', '厶'), - (0x2F1C, 'M', '又'), - (0x2F1D, 'M', '口'), - (0x2F1E, 'M', '囗'), - (0x2F1F, 'M', '土'), - (0x2F20, 'M', '士'), - (0x2F21, 'M', '夂'), - (0x2F22, 'M', '夊'), - (0x2F23, 'M', '夕'), - (0x2F24, 'M', '大'), - (0x2F25, 'M', '女'), - (0x2F26, 'M', '子'), - (0x2F27, 'M', '宀'), - (0x2F28, 'M', '寸'), - (0x2F29, 'M', '小'), - (0x2F2A, 'M', '尢'), - (0x2F2B, 'M', '尸'), - (0x2F2C, 'M', '屮'), - (0x2F2D, 'M', '山'), - (0x2F2E, 'M', '巛'), - (0x2F2F, 'M', '工'), - (0x2F30, 'M', '己'), - (0x2F31, 'M', '巾'), - (0x2F32, 'M', '干'), - (0x2F33, 'M', '幺'), - (0x2F34, 'M', '广'), - (0x2F35, 'M', '廴'), - (0x2F36, 'M', '廾'), - (0x2F37, 'M', '弋'), - (0x2F38, 'M', '弓'), - (0x2F39, 'M', '彐'), - (0x2F3A, 'M', '彡'), - (0x2F3B, 'M', '彳'), - (0x2F3C, 'M', '心'), - (0x2F3D, 'M', '戈'), - (0x2F3E, 'M', '戶'), - (0x2F3F, 'M', '手'), - (0x2F40, 'M', '支'), - (0x2F41, 'M', '攴'), - (0x2F42, 'M', '文'), - (0x2F43, 'M', '斗'), - (0x2F44, 'M', '斤'), - (0x2F45, 'M', '方'), - (0x2F46, 'M', '无'), - (0x2F47, 'M', '日'), - (0x2F48, 'M', '曰'), - (0x2F49, 'M', '月'), - (0x2F4A, 'M', '木'), - (0x2F4B, 'M', '欠'), - (0x2F4C, 'M', '止'), - (0x2F4D, 'M', '歹'), - (0x2F4E, 'M', '殳'), - (0x2F4F, 'M', '毋'), - (0x2F50, 'M', '比'), - (0x2F51, 'M', '毛'), - (0x2F52, 'M', '氏'), - (0x2F53, 'M', '气'), - (0x2F54, 'M', '水'), - (0x2F55, 'M', '火'), - (0x2F56, 'M', '爪'), - (0x2F57, 'M', '父'), - (0x2F58, 'M', '爻'), - (0x2F59, 'M', '爿'), - (0x2F5A, 'M', '片'), - (0x2F5B, 'M', '牙'), - (0x2F5C, 'M', '牛'), - (0x2F5D, 'M', '犬'), - (0x2F5E, 'M', '玄'), - (0x2F5F, 'M', '玉'), - (0x2F60, 'M', '瓜'), - (0x2F61, 'M', '瓦'), - (0x2F62, 'M', '甘'), - (0x2F63, 'M', '生'), - (0x2F64, 'M', '用'), - (0x2F65, 'M', '田'), - (0x2F66, 'M', '疋'), - (0x2F67, 'M', '疒'), - (0x2F68, 'M', '癶'), - (0x2F69, 'M', '白'), - (0x2F6A, 'M', '皮'), - (0x2F6B, 'M', '皿'), - (0x2F6C, 'M', '目'), - (0x2F6D, 'M', '矛'), - (0x2F6E, 'M', '矢'), - (0x2F6F, 'M', '石'), - (0x2F70, 'M', '示'), - (0x2F71, 'M', '禸'), - (0x2F72, 'M', '禾'), - (0x2F73, 'M', '穴'), - (0x2F74, 'M', '立'), - (0x2F75, 'M', '竹'), - (0x2F76, 'M', '米'), - (0x2F77, 'M', '糸'), - (0x2F78, 'M', '缶'), - (0x2F79, 'M', '网'), - ] - -def _seg_28() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F7A, 'M', '羊'), - (0x2F7B, 'M', '羽'), - (0x2F7C, 'M', '老'), - (0x2F7D, 'M', '而'), - (0x2F7E, 'M', '耒'), - (0x2F7F, 'M', '耳'), - (0x2F80, 'M', '聿'), - (0x2F81, 'M', '肉'), - (0x2F82, 'M', '臣'), - (0x2F83, 'M', '自'), - (0x2F84, 'M', '至'), - (0x2F85, 'M', '臼'), - (0x2F86, 'M', '舌'), - (0x2F87, 'M', '舛'), - (0x2F88, 'M', '舟'), - (0x2F89, 'M', '艮'), - (0x2F8A, 'M', '色'), - (0x2F8B, 'M', '艸'), - (0x2F8C, 'M', '虍'), - (0x2F8D, 'M', '虫'), - (0x2F8E, 'M', '血'), - (0x2F8F, 'M', '行'), - (0x2F90, 'M', '衣'), - (0x2F91, 'M', '襾'), - (0x2F92, 'M', '見'), - (0x2F93, 'M', '角'), - (0x2F94, 'M', '言'), - (0x2F95, 'M', '谷'), - (0x2F96, 'M', '豆'), - (0x2F97, 'M', '豕'), - (0x2F98, 'M', '豸'), - (0x2F99, 'M', '貝'), - (0x2F9A, 'M', '赤'), - (0x2F9B, 'M', '走'), - (0x2F9C, 'M', '足'), - (0x2F9D, 'M', '身'), - (0x2F9E, 'M', '車'), - (0x2F9F, 'M', '辛'), - (0x2FA0, 'M', '辰'), - (0x2FA1, 'M', '辵'), - (0x2FA2, 'M', '邑'), - (0x2FA3, 'M', '酉'), - (0x2FA4, 'M', '釆'), - (0x2FA5, 'M', '里'), - (0x2FA6, 'M', '金'), - (0x2FA7, 'M', '長'), - (0x2FA8, 'M', '門'), - (0x2FA9, 'M', '阜'), - (0x2FAA, 'M', '隶'), - (0x2FAB, 'M', '隹'), - (0x2FAC, 'M', '雨'), - (0x2FAD, 'M', '靑'), - (0x2FAE, 'M', '非'), - (0x2FAF, 'M', '面'), - (0x2FB0, 'M', '革'), - (0x2FB1, 'M', '韋'), - (0x2FB2, 'M', '韭'), - (0x2FB3, 'M', '音'), - (0x2FB4, 'M', '頁'), - (0x2FB5, 'M', '風'), - (0x2FB6, 'M', '飛'), - (0x2FB7, 'M', '食'), - (0x2FB8, 'M', '首'), - (0x2FB9, 'M', '香'), - (0x2FBA, 'M', '馬'), - (0x2FBB, 'M', '骨'), - (0x2FBC, 'M', '高'), - (0x2FBD, 'M', '髟'), - (0x2FBE, 'M', '鬥'), - (0x2FBF, 'M', '鬯'), - (0x2FC0, 'M', '鬲'), - (0x2FC1, 'M', '鬼'), - (0x2FC2, 'M', '魚'), - (0x2FC3, 'M', '鳥'), - (0x2FC4, 'M', '鹵'), - (0x2FC5, 'M', '鹿'), - (0x2FC6, 'M', '麥'), - (0x2FC7, 'M', '麻'), - (0x2FC8, 'M', '黃'), - (0x2FC9, 'M', '黍'), - (0x2FCA, 'M', '黑'), - (0x2FCB, 'M', '黹'), - (0x2FCC, 'M', '黽'), - (0x2FCD, 'M', '鼎'), - (0x2FCE, 'M', '鼓'), - (0x2FCF, 'M', '鼠'), - (0x2FD0, 'M', '鼻'), - (0x2FD1, 'M', '齊'), - (0x2FD2, 'M', '齒'), - (0x2FD3, 'M', '龍'), - (0x2FD4, 'M', '龜'), - (0x2FD5, 'M', '龠'), - (0x2FD6, 'X'), - (0x3000, '3', ' '), - (0x3001, 'V'), - (0x3002, 'M', '.'), - (0x3003, 'V'), - (0x3036, 'M', '〒'), - (0x3037, 'V'), - (0x3038, 'M', '十'), - ] - -def _seg_29() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3039, 'M', '卄'), - (0x303A, 'M', '卅'), - (0x303B, 'V'), - (0x3040, 'X'), - (0x3041, 'V'), - (0x3097, 'X'), - (0x3099, 'V'), - (0x309B, '3', ' ゙'), - (0x309C, '3', ' ゚'), - (0x309D, 'V'), - (0x309F, 'M', 'より'), - (0x30A0, 'V'), - (0x30FF, 'M', 'コト'), - (0x3100, 'X'), - (0x3105, 'V'), - (0x3130, 'X'), - (0x3131, 'M', 'ᄀ'), - (0x3132, 'M', 'ᄁ'), - (0x3133, 'M', 'ᆪ'), - (0x3134, 'M', 'ᄂ'), - (0x3135, 'M', 'ᆬ'), - (0x3136, 'M', 'ᆭ'), - (0x3137, 'M', 'ᄃ'), - (0x3138, 'M', 'ᄄ'), - (0x3139, 'M', 'ᄅ'), - (0x313A, 'M', 'ᆰ'), - (0x313B, 'M', 'ᆱ'), - (0x313C, 'M', 'ᆲ'), - (0x313D, 'M', 'ᆳ'), - (0x313E, 'M', 'ᆴ'), - (0x313F, 'M', 'ᆵ'), - (0x3140, 'M', 'ᄚ'), - (0x3141, 'M', 'ᄆ'), - (0x3142, 'M', 'ᄇ'), - (0x3143, 'M', 'ᄈ'), - (0x3144, 'M', 'ᄡ'), - (0x3145, 'M', 'ᄉ'), - (0x3146, 'M', 'ᄊ'), - (0x3147, 'M', 'ᄋ'), - (0x3148, 'M', 'ᄌ'), - (0x3149, 'M', 'ᄍ'), - (0x314A, 'M', 'ᄎ'), - (0x314B, 'M', 'ᄏ'), - (0x314C, 'M', 'ᄐ'), - (0x314D, 'M', 'ᄑ'), - (0x314E, 'M', 'ᄒ'), - (0x314F, 'M', 'ᅡ'), - (0x3150, 'M', 'ᅢ'), - (0x3151, 'M', 'ᅣ'), - (0x3152, 'M', 'ᅤ'), - (0x3153, 'M', 'ᅥ'), - (0x3154, 'M', 'ᅦ'), - (0x3155, 'M', 'ᅧ'), - (0x3156, 'M', 'ᅨ'), - (0x3157, 'M', 'ᅩ'), - (0x3158, 'M', 'ᅪ'), - (0x3159, 'M', 'ᅫ'), - (0x315A, 'M', 'ᅬ'), - (0x315B, 'M', 'ᅭ'), - (0x315C, 'M', 'ᅮ'), - (0x315D, 'M', 'ᅯ'), - (0x315E, 'M', 'ᅰ'), - (0x315F, 'M', 'ᅱ'), - (0x3160, 'M', 'ᅲ'), - (0x3161, 'M', 'ᅳ'), - (0x3162, 'M', 'ᅴ'), - (0x3163, 'M', 'ᅵ'), - (0x3164, 'X'), - (0x3165, 'M', 'ᄔ'), - (0x3166, 'M', 'ᄕ'), - (0x3167, 'M', 'ᇇ'), - (0x3168, 'M', 'ᇈ'), - (0x3169, 'M', 'ᇌ'), - (0x316A, 'M', 'ᇎ'), - (0x316B, 'M', 'ᇓ'), - (0x316C, 'M', 'ᇗ'), - (0x316D, 'M', 'ᇙ'), - (0x316E, 'M', 'ᄜ'), - (0x316F, 'M', 'ᇝ'), - (0x3170, 'M', 'ᇟ'), - (0x3171, 'M', 'ᄝ'), - (0x3172, 'M', 'ᄞ'), - (0x3173, 'M', 'ᄠ'), - (0x3174, 'M', 'ᄢ'), - (0x3175, 'M', 'ᄣ'), - (0x3176, 'M', 'ᄧ'), - (0x3177, 'M', 'ᄩ'), - (0x3178, 'M', 'ᄫ'), - (0x3179, 'M', 'ᄬ'), - (0x317A, 'M', 'ᄭ'), - (0x317B, 'M', 'ᄮ'), - (0x317C, 'M', 'ᄯ'), - (0x317D, 'M', 'ᄲ'), - (0x317E, 'M', 'ᄶ'), - (0x317F, 'M', 'ᅀ'), - (0x3180, 'M', 'ᅇ'), - (0x3181, 'M', 'ᅌ'), - (0x3182, 'M', 'ᇱ'), - (0x3183, 'M', 'ᇲ'), - (0x3184, 'M', 'ᅗ'), - ] - -def _seg_30() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3185, 'M', 'ᅘ'), - (0x3186, 'M', 'ᅙ'), - (0x3187, 'M', 'ᆄ'), - (0x3188, 'M', 'ᆅ'), - (0x3189, 'M', 'ᆈ'), - (0x318A, 'M', 'ᆑ'), - (0x318B, 'M', 'ᆒ'), - (0x318C, 'M', 'ᆔ'), - (0x318D, 'M', 'ᆞ'), - (0x318E, 'M', 'ᆡ'), - (0x318F, 'X'), - (0x3190, 'V'), - (0x3192, 'M', '一'), - (0x3193, 'M', '二'), - (0x3194, 'M', '三'), - (0x3195, 'M', '四'), - (0x3196, 'M', '上'), - (0x3197, 'M', '中'), - (0x3198, 'M', '下'), - (0x3199, 'M', '甲'), - (0x319A, 'M', '乙'), - (0x319B, 'M', '丙'), - (0x319C, 'M', '丁'), - (0x319D, 'M', '天'), - (0x319E, 'M', '地'), - (0x319F, 'M', '人'), - (0x31A0, 'V'), - (0x31E4, 'X'), - (0x31F0, 'V'), - (0x3200, '3', '(ᄀ)'), - (0x3201, '3', '(ᄂ)'), - (0x3202, '3', '(ᄃ)'), - (0x3203, '3', '(ᄅ)'), - (0x3204, '3', '(ᄆ)'), - (0x3205, '3', '(ᄇ)'), - (0x3206, '3', '(ᄉ)'), - (0x3207, '3', '(ᄋ)'), - (0x3208, '3', '(ᄌ)'), - (0x3209, '3', '(ᄎ)'), - (0x320A, '3', '(ᄏ)'), - (0x320B, '3', '(ᄐ)'), - (0x320C, '3', '(ᄑ)'), - (0x320D, '3', '(ᄒ)'), - (0x320E, '3', '(가)'), - (0x320F, '3', '(나)'), - (0x3210, '3', '(다)'), - (0x3211, '3', '(라)'), - (0x3212, '3', '(마)'), - (0x3213, '3', '(바)'), - (0x3214, '3', '(사)'), - (0x3215, '3', '(아)'), - (0x3216, '3', '(자)'), - (0x3217, '3', '(차)'), - (0x3218, '3', '(카)'), - (0x3219, '3', '(타)'), - (0x321A, '3', '(파)'), - (0x321B, '3', '(하)'), - (0x321C, '3', '(주)'), - (0x321D, '3', '(오전)'), - (0x321E, '3', '(오후)'), - (0x321F, 'X'), - (0x3220, '3', '(一)'), - (0x3221, '3', '(二)'), - (0x3222, '3', '(三)'), - (0x3223, '3', '(四)'), - (0x3224, '3', '(五)'), - (0x3225, '3', '(六)'), - (0x3226, '3', '(七)'), - (0x3227, '3', '(八)'), - (0x3228, '3', '(九)'), - (0x3229, '3', '(十)'), - (0x322A, '3', '(月)'), - (0x322B, '3', '(火)'), - (0x322C, '3', '(水)'), - (0x322D, '3', '(木)'), - (0x322E, '3', '(金)'), - (0x322F, '3', '(土)'), - (0x3230, '3', '(日)'), - (0x3231, '3', '(株)'), - (0x3232, '3', '(有)'), - (0x3233, '3', '(社)'), - (0x3234, '3', '(名)'), - (0x3235, '3', '(特)'), - (0x3236, '3', '(財)'), - (0x3237, '3', '(祝)'), - (0x3238, '3', '(労)'), - (0x3239, '3', '(代)'), - (0x323A, '3', '(呼)'), - (0x323B, '3', '(学)'), - (0x323C, '3', '(監)'), - (0x323D, '3', '(企)'), - (0x323E, '3', '(資)'), - (0x323F, '3', '(協)'), - (0x3240, '3', '(祭)'), - (0x3241, '3', '(休)'), - (0x3242, '3', '(自)'), - (0x3243, '3', '(至)'), - (0x3244, 'M', '問'), - (0x3245, 'M', '幼'), - (0x3246, 'M', '文'), - ] - -def _seg_31() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3247, 'M', '箏'), - (0x3248, 'V'), - (0x3250, 'M', 'pte'), - (0x3251, 'M', '21'), - (0x3252, 'M', '22'), - (0x3253, 'M', '23'), - (0x3254, 'M', '24'), - (0x3255, 'M', '25'), - (0x3256, 'M', '26'), - (0x3257, 'M', '27'), - (0x3258, 'M', '28'), - (0x3259, 'M', '29'), - (0x325A, 'M', '30'), - (0x325B, 'M', '31'), - (0x325C, 'M', '32'), - (0x325D, 'M', '33'), - (0x325E, 'M', '34'), - (0x325F, 'M', '35'), - (0x3260, 'M', 'ᄀ'), - (0x3261, 'M', 'ᄂ'), - (0x3262, 'M', 'ᄃ'), - (0x3263, 'M', 'ᄅ'), - (0x3264, 'M', 'ᄆ'), - (0x3265, 'M', 'ᄇ'), - (0x3266, 'M', 'ᄉ'), - (0x3267, 'M', 'ᄋ'), - (0x3268, 'M', 'ᄌ'), - (0x3269, 'M', 'ᄎ'), - (0x326A, 'M', 'ᄏ'), - (0x326B, 'M', 'ᄐ'), - (0x326C, 'M', 'ᄑ'), - (0x326D, 'M', 'ᄒ'), - (0x326E, 'M', '가'), - (0x326F, 'M', '나'), - (0x3270, 'M', '다'), - (0x3271, 'M', '라'), - (0x3272, 'M', '마'), - (0x3273, 'M', '바'), - (0x3274, 'M', '사'), - (0x3275, 'M', '아'), - (0x3276, 'M', '자'), - (0x3277, 'M', '차'), - (0x3278, 'M', '카'), - (0x3279, 'M', '타'), - (0x327A, 'M', '파'), - (0x327B, 'M', '하'), - (0x327C, 'M', '참고'), - (0x327D, 'M', '주의'), - (0x327E, 'M', '우'), - (0x327F, 'V'), - (0x3280, 'M', '一'), - (0x3281, 'M', '二'), - (0x3282, 'M', '三'), - (0x3283, 'M', '四'), - (0x3284, 'M', '五'), - (0x3285, 'M', '六'), - (0x3286, 'M', '七'), - (0x3287, 'M', '八'), - (0x3288, 'M', '九'), - (0x3289, 'M', '十'), - (0x328A, 'M', '月'), - (0x328B, 'M', '火'), - (0x328C, 'M', '水'), - (0x328D, 'M', '木'), - (0x328E, 'M', '金'), - (0x328F, 'M', '土'), - (0x3290, 'M', '日'), - (0x3291, 'M', '株'), - (0x3292, 'M', '有'), - (0x3293, 'M', '社'), - (0x3294, 'M', '名'), - (0x3295, 'M', '特'), - (0x3296, 'M', '財'), - (0x3297, 'M', '祝'), - (0x3298, 'M', '労'), - (0x3299, 'M', '秘'), - (0x329A, 'M', '男'), - (0x329B, 'M', '女'), - (0x329C, 'M', '適'), - (0x329D, 'M', '優'), - (0x329E, 'M', '印'), - (0x329F, 'M', '注'), - (0x32A0, 'M', '項'), - (0x32A1, 'M', '休'), - (0x32A2, 'M', '写'), - (0x32A3, 'M', '正'), - (0x32A4, 'M', '上'), - (0x32A5, 'M', '中'), - (0x32A6, 'M', '下'), - (0x32A7, 'M', '左'), - (0x32A8, 'M', '右'), - (0x32A9, 'M', '医'), - (0x32AA, 'M', '宗'), - (0x32AB, 'M', '学'), - (0x32AC, 'M', '監'), - (0x32AD, 'M', '企'), - (0x32AE, 'M', '資'), - (0x32AF, 'M', '協'), - (0x32B0, 'M', '夜'), - (0x32B1, 'M', '36'), - ] - -def _seg_32() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x32B2, 'M', '37'), - (0x32B3, 'M', '38'), - (0x32B4, 'M', '39'), - (0x32B5, 'M', '40'), - (0x32B6, 'M', '41'), - (0x32B7, 'M', '42'), - (0x32B8, 'M', '43'), - (0x32B9, 'M', '44'), - (0x32BA, 'M', '45'), - (0x32BB, 'M', '46'), - (0x32BC, 'M', '47'), - (0x32BD, 'M', '48'), - (0x32BE, 'M', '49'), - (0x32BF, 'M', '50'), - (0x32C0, 'M', '1月'), - (0x32C1, 'M', '2月'), - (0x32C2, 'M', '3月'), - (0x32C3, 'M', '4月'), - (0x32C4, 'M', '5月'), - (0x32C5, 'M', '6月'), - (0x32C6, 'M', '7月'), - (0x32C7, 'M', '8月'), - (0x32C8, 'M', '9月'), - (0x32C9, 'M', '10月'), - (0x32CA, 'M', '11月'), - (0x32CB, 'M', '12月'), - (0x32CC, 'M', 'hg'), - (0x32CD, 'M', 'erg'), - (0x32CE, 'M', 'ev'), - (0x32CF, 'M', 'ltd'), - (0x32D0, 'M', 'ア'), - (0x32D1, 'M', 'イ'), - (0x32D2, 'M', 'ウ'), - (0x32D3, 'M', 'エ'), - (0x32D4, 'M', 'オ'), - (0x32D5, 'M', 'カ'), - (0x32D6, 'M', 'キ'), - (0x32D7, 'M', 'ク'), - (0x32D8, 'M', 'ケ'), - (0x32D9, 'M', 'コ'), - (0x32DA, 'M', 'サ'), - (0x32DB, 'M', 'シ'), - (0x32DC, 'M', 'ス'), - (0x32DD, 'M', 'セ'), - (0x32DE, 'M', 'ソ'), - (0x32DF, 'M', 'タ'), - (0x32E0, 'M', 'チ'), - (0x32E1, 'M', 'ツ'), - (0x32E2, 'M', 'テ'), - (0x32E3, 'M', 'ト'), - (0x32E4, 'M', 'ナ'), - (0x32E5, 'M', 'ニ'), - (0x32E6, 'M', 'ヌ'), - (0x32E7, 'M', 'ネ'), - (0x32E8, 'M', 'ノ'), - (0x32E9, 'M', 'ハ'), - (0x32EA, 'M', 'ヒ'), - (0x32EB, 'M', 'フ'), - (0x32EC, 'M', 'ヘ'), - (0x32ED, 'M', 'ホ'), - (0x32EE, 'M', 'マ'), - (0x32EF, 'M', 'ミ'), - (0x32F0, 'M', 'ム'), - (0x32F1, 'M', 'メ'), - (0x32F2, 'M', 'モ'), - (0x32F3, 'M', 'ヤ'), - (0x32F4, 'M', 'ユ'), - (0x32F5, 'M', 'ヨ'), - (0x32F6, 'M', 'ラ'), - (0x32F7, 'M', 'リ'), - (0x32F8, 'M', 'ル'), - (0x32F9, 'M', 'レ'), - (0x32FA, 'M', 'ロ'), - (0x32FB, 'M', 'ワ'), - (0x32FC, 'M', 'ヰ'), - (0x32FD, 'M', 'ヱ'), - (0x32FE, 'M', 'ヲ'), - (0x32FF, 'M', '令和'), - (0x3300, 'M', 'アパート'), - (0x3301, 'M', 'アルファ'), - (0x3302, 'M', 'アンペア'), - (0x3303, 'M', 'アール'), - (0x3304, 'M', 'イニング'), - (0x3305, 'M', 'インチ'), - (0x3306, 'M', 'ウォン'), - (0x3307, 'M', 'エスクード'), - (0x3308, 'M', 'エーカー'), - (0x3309, 'M', 'オンス'), - (0x330A, 'M', 'オーム'), - (0x330B, 'M', 'カイリ'), - (0x330C, 'M', 'カラット'), - (0x330D, 'M', 'カロリー'), - (0x330E, 'M', 'ガロン'), - (0x330F, 'M', 'ガンマ'), - (0x3310, 'M', 'ギガ'), - (0x3311, 'M', 'ギニー'), - (0x3312, 'M', 'キュリー'), - (0x3313, 'M', 'ギルダー'), - (0x3314, 'M', 'キロ'), - (0x3315, 'M', 'キログラム'), - ] - -def _seg_33() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3316, 'M', 'キロメートル'), - (0x3317, 'M', 'キロワット'), - (0x3318, 'M', 'グラム'), - (0x3319, 'M', 'グラムトン'), - (0x331A, 'M', 'クルゼイロ'), - (0x331B, 'M', 'クローネ'), - (0x331C, 'M', 'ケース'), - (0x331D, 'M', 'コルナ'), - (0x331E, 'M', 'コーポ'), - (0x331F, 'M', 'サイクル'), - (0x3320, 'M', 'サンチーム'), - (0x3321, 'M', 'シリング'), - (0x3322, 'M', 'センチ'), - (0x3323, 'M', 'セント'), - (0x3324, 'M', 'ダース'), - (0x3325, 'M', 'デシ'), - (0x3326, 'M', 'ドル'), - (0x3327, 'M', 'トン'), - (0x3328, 'M', 'ナノ'), - (0x3329, 'M', 'ノット'), - (0x332A, 'M', 'ハイツ'), - (0x332B, 'M', 'パーセント'), - (0x332C, 'M', 'パーツ'), - (0x332D, 'M', 'バーレル'), - (0x332E, 'M', 'ピアストル'), - (0x332F, 'M', 'ピクル'), - (0x3330, 'M', 'ピコ'), - (0x3331, 'M', 'ビル'), - (0x3332, 'M', 'ファラッド'), - (0x3333, 'M', 'フィート'), - (0x3334, 'M', 'ブッシェル'), - (0x3335, 'M', 'フラン'), - (0x3336, 'M', 'ヘクタール'), - (0x3337, 'M', 'ペソ'), - (0x3338, 'M', 'ペニヒ'), - (0x3339, 'M', 'ヘルツ'), - (0x333A, 'M', 'ペンス'), - (0x333B, 'M', 'ページ'), - (0x333C, 'M', 'ベータ'), - (0x333D, 'M', 'ポイント'), - (0x333E, 'M', 'ボルト'), - (0x333F, 'M', 'ホン'), - (0x3340, 'M', 'ポンド'), - (0x3341, 'M', 'ホール'), - (0x3342, 'M', 'ホーン'), - (0x3343, 'M', 'マイクロ'), - (0x3344, 'M', 'マイル'), - (0x3345, 'M', 'マッハ'), - (0x3346, 'M', 'マルク'), - (0x3347, 'M', 'マンション'), - (0x3348, 'M', 'ミクロン'), - (0x3349, 'M', 'ミリ'), - (0x334A, 'M', 'ミリバール'), - (0x334B, 'M', 'メガ'), - (0x334C, 'M', 'メガトン'), - (0x334D, 'M', 'メートル'), - (0x334E, 'M', 'ヤード'), - (0x334F, 'M', 'ヤール'), - (0x3350, 'M', 'ユアン'), - (0x3351, 'M', 'リットル'), - (0x3352, 'M', 'リラ'), - (0x3353, 'M', 'ルピー'), - (0x3354, 'M', 'ルーブル'), - (0x3355, 'M', 'レム'), - (0x3356, 'M', 'レントゲン'), - (0x3357, 'M', 'ワット'), - (0x3358, 'M', '0点'), - (0x3359, 'M', '1点'), - (0x335A, 'M', '2点'), - (0x335B, 'M', '3点'), - (0x335C, 'M', '4点'), - (0x335D, 'M', '5点'), - (0x335E, 'M', '6点'), - (0x335F, 'M', '7点'), - (0x3360, 'M', '8点'), - (0x3361, 'M', '9点'), - (0x3362, 'M', '10点'), - (0x3363, 'M', '11点'), - (0x3364, 'M', '12点'), - (0x3365, 'M', '13点'), - (0x3366, 'M', '14点'), - (0x3367, 'M', '15点'), - (0x3368, 'M', '16点'), - (0x3369, 'M', '17点'), - (0x336A, 'M', '18点'), - (0x336B, 'M', '19点'), - (0x336C, 'M', '20点'), - (0x336D, 'M', '21点'), - (0x336E, 'M', '22点'), - (0x336F, 'M', '23点'), - (0x3370, 'M', '24点'), - (0x3371, 'M', 'hpa'), - (0x3372, 'M', 'da'), - (0x3373, 'M', 'au'), - (0x3374, 'M', 'bar'), - (0x3375, 'M', 'ov'), - (0x3376, 'M', 'pc'), - (0x3377, 'M', 'dm'), - (0x3378, 'M', 'dm2'), - (0x3379, 'M', 'dm3'), - ] - -def _seg_34() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x337A, 'M', 'iu'), - (0x337B, 'M', '平成'), - (0x337C, 'M', '昭和'), - (0x337D, 'M', '大正'), - (0x337E, 'M', '明治'), - (0x337F, 'M', '株式会社'), - (0x3380, 'M', 'pa'), - (0x3381, 'M', 'na'), - (0x3382, 'M', 'μa'), - (0x3383, 'M', 'ma'), - (0x3384, 'M', 'ka'), - (0x3385, 'M', 'kb'), - (0x3386, 'M', 'mb'), - (0x3387, 'M', 'gb'), - (0x3388, 'M', 'cal'), - (0x3389, 'M', 'kcal'), - (0x338A, 'M', 'pf'), - (0x338B, 'M', 'nf'), - (0x338C, 'M', 'μf'), - (0x338D, 'M', 'μg'), - (0x338E, 'M', 'mg'), - (0x338F, 'M', 'kg'), - (0x3390, 'M', 'hz'), - (0x3391, 'M', 'khz'), - (0x3392, 'M', 'mhz'), - (0x3393, 'M', 'ghz'), - (0x3394, 'M', 'thz'), - (0x3395, 'M', 'μl'), - (0x3396, 'M', 'ml'), - (0x3397, 'M', 'dl'), - (0x3398, 'M', 'kl'), - (0x3399, 'M', 'fm'), - (0x339A, 'M', 'nm'), - (0x339B, 'M', 'μm'), - (0x339C, 'M', 'mm'), - (0x339D, 'M', 'cm'), - (0x339E, 'M', 'km'), - (0x339F, 'M', 'mm2'), - (0x33A0, 'M', 'cm2'), - (0x33A1, 'M', 'm2'), - (0x33A2, 'M', 'km2'), - (0x33A3, 'M', 'mm3'), - (0x33A4, 'M', 'cm3'), - (0x33A5, 'M', 'm3'), - (0x33A6, 'M', 'km3'), - (0x33A7, 'M', 'm∕s'), - (0x33A8, 'M', 'm∕s2'), - (0x33A9, 'M', 'pa'), - (0x33AA, 'M', 'kpa'), - (0x33AB, 'M', 'mpa'), - (0x33AC, 'M', 'gpa'), - (0x33AD, 'M', 'rad'), - (0x33AE, 'M', 'rad∕s'), - (0x33AF, 'M', 'rad∕s2'), - (0x33B0, 'M', 'ps'), - (0x33B1, 'M', 'ns'), - (0x33B2, 'M', 'μs'), - (0x33B3, 'M', 'ms'), - (0x33B4, 'M', 'pv'), - (0x33B5, 'M', 'nv'), - (0x33B6, 'M', 'μv'), - (0x33B7, 'M', 'mv'), - (0x33B8, 'M', 'kv'), - (0x33B9, 'M', 'mv'), - (0x33BA, 'M', 'pw'), - (0x33BB, 'M', 'nw'), - (0x33BC, 'M', 'μw'), - (0x33BD, 'M', 'mw'), - (0x33BE, 'M', 'kw'), - (0x33BF, 'M', 'mw'), - (0x33C0, 'M', 'kω'), - (0x33C1, 'M', 'mω'), - (0x33C2, 'X'), - (0x33C3, 'M', 'bq'), - (0x33C4, 'M', 'cc'), - (0x33C5, 'M', 'cd'), - (0x33C6, 'M', 'c∕kg'), - (0x33C7, 'X'), - (0x33C8, 'M', 'db'), - (0x33C9, 'M', 'gy'), - (0x33CA, 'M', 'ha'), - (0x33CB, 'M', 'hp'), - (0x33CC, 'M', 'in'), - (0x33CD, 'M', 'kk'), - (0x33CE, 'M', 'km'), - (0x33CF, 'M', 'kt'), - (0x33D0, 'M', 'lm'), - (0x33D1, 'M', 'ln'), - (0x33D2, 'M', 'log'), - (0x33D3, 'M', 'lx'), - (0x33D4, 'M', 'mb'), - (0x33D5, 'M', 'mil'), - (0x33D6, 'M', 'mol'), - (0x33D7, 'M', 'ph'), - (0x33D8, 'X'), - (0x33D9, 'M', 'ppm'), - (0x33DA, 'M', 'pr'), - (0x33DB, 'M', 'sr'), - (0x33DC, 'M', 'sv'), - (0x33DD, 'M', 'wb'), - ] - -def _seg_35() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x33DE, 'M', 'v∕m'), - (0x33DF, 'M', 'a∕m'), - (0x33E0, 'M', '1日'), - (0x33E1, 'M', '2日'), - (0x33E2, 'M', '3日'), - (0x33E3, 'M', '4日'), - (0x33E4, 'M', '5日'), - (0x33E5, 'M', '6日'), - (0x33E6, 'M', '7日'), - (0x33E7, 'M', '8日'), - (0x33E8, 'M', '9日'), - (0x33E9, 'M', '10日'), - (0x33EA, 'M', '11日'), - (0x33EB, 'M', '12日'), - (0x33EC, 'M', '13日'), - (0x33ED, 'M', '14日'), - (0x33EE, 'M', '15日'), - (0x33EF, 'M', '16日'), - (0x33F0, 'M', '17日'), - (0x33F1, 'M', '18日'), - (0x33F2, 'M', '19日'), - (0x33F3, 'M', '20日'), - (0x33F4, 'M', '21日'), - (0x33F5, 'M', '22日'), - (0x33F6, 'M', '23日'), - (0x33F7, 'M', '24日'), - (0x33F8, 'M', '25日'), - (0x33F9, 'M', '26日'), - (0x33FA, 'M', '27日'), - (0x33FB, 'M', '28日'), - (0x33FC, 'M', '29日'), - (0x33FD, 'M', '30日'), - (0x33FE, 'M', '31日'), - (0x33FF, 'M', 'gal'), - (0x3400, 'V'), - (0xA48D, 'X'), - (0xA490, 'V'), - (0xA4C7, 'X'), - (0xA4D0, 'V'), - (0xA62C, 'X'), - (0xA640, 'M', 'ꙁ'), - (0xA641, 'V'), - (0xA642, 'M', 'ꙃ'), - (0xA643, 'V'), - (0xA644, 'M', 'ꙅ'), - (0xA645, 'V'), - (0xA646, 'M', 'ꙇ'), - (0xA647, 'V'), - (0xA648, 'M', 'ꙉ'), - (0xA649, 'V'), - (0xA64A, 'M', 'ꙋ'), - (0xA64B, 'V'), - (0xA64C, 'M', 'ꙍ'), - (0xA64D, 'V'), - (0xA64E, 'M', 'ꙏ'), - (0xA64F, 'V'), - (0xA650, 'M', 'ꙑ'), - (0xA651, 'V'), - (0xA652, 'M', 'ꙓ'), - (0xA653, 'V'), - (0xA654, 'M', 'ꙕ'), - (0xA655, 'V'), - (0xA656, 'M', 'ꙗ'), - (0xA657, 'V'), - (0xA658, 'M', 'ꙙ'), - (0xA659, 'V'), - (0xA65A, 'M', 'ꙛ'), - (0xA65B, 'V'), - (0xA65C, 'M', 'ꙝ'), - (0xA65D, 'V'), - (0xA65E, 'M', 'ꙟ'), - (0xA65F, 'V'), - (0xA660, 'M', 'ꙡ'), - (0xA661, 'V'), - (0xA662, 'M', 'ꙣ'), - (0xA663, 'V'), - (0xA664, 'M', 'ꙥ'), - (0xA665, 'V'), - (0xA666, 'M', 'ꙧ'), - (0xA667, 'V'), - (0xA668, 'M', 'ꙩ'), - (0xA669, 'V'), - (0xA66A, 'M', 'ꙫ'), - (0xA66B, 'V'), - (0xA66C, 'M', 'ꙭ'), - (0xA66D, 'V'), - (0xA680, 'M', 'ꚁ'), - (0xA681, 'V'), - (0xA682, 'M', 'ꚃ'), - (0xA683, 'V'), - (0xA684, 'M', 'ꚅ'), - (0xA685, 'V'), - (0xA686, 'M', 'ꚇ'), - (0xA687, 'V'), - (0xA688, 'M', 'ꚉ'), - (0xA689, 'V'), - (0xA68A, 'M', 'ꚋ'), - (0xA68B, 'V'), - (0xA68C, 'M', 'ꚍ'), - (0xA68D, 'V'), - ] - -def _seg_36() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA68E, 'M', 'ꚏ'), - (0xA68F, 'V'), - (0xA690, 'M', 'ꚑ'), - (0xA691, 'V'), - (0xA692, 'M', 'ꚓ'), - (0xA693, 'V'), - (0xA694, 'M', 'ꚕ'), - (0xA695, 'V'), - (0xA696, 'M', 'ꚗ'), - (0xA697, 'V'), - (0xA698, 'M', 'ꚙ'), - (0xA699, 'V'), - (0xA69A, 'M', 'ꚛ'), - (0xA69B, 'V'), - (0xA69C, 'M', 'ъ'), - (0xA69D, 'M', 'ь'), - (0xA69E, 'V'), - (0xA6F8, 'X'), - (0xA700, 'V'), - (0xA722, 'M', 'ꜣ'), - (0xA723, 'V'), - (0xA724, 'M', 'ꜥ'), - (0xA725, 'V'), - (0xA726, 'M', 'ꜧ'), - (0xA727, 'V'), - (0xA728, 'M', 'ꜩ'), - (0xA729, 'V'), - (0xA72A, 'M', 'ꜫ'), - (0xA72B, 'V'), - (0xA72C, 'M', 'ꜭ'), - (0xA72D, 'V'), - (0xA72E, 'M', 'ꜯ'), - (0xA72F, 'V'), - (0xA732, 'M', 'ꜳ'), - (0xA733, 'V'), - (0xA734, 'M', 'ꜵ'), - (0xA735, 'V'), - (0xA736, 'M', 'ꜷ'), - (0xA737, 'V'), - (0xA738, 'M', 'ꜹ'), - (0xA739, 'V'), - (0xA73A, 'M', 'ꜻ'), - (0xA73B, 'V'), - (0xA73C, 'M', 'ꜽ'), - (0xA73D, 'V'), - (0xA73E, 'M', 'ꜿ'), - (0xA73F, 'V'), - (0xA740, 'M', 'ꝁ'), - (0xA741, 'V'), - (0xA742, 'M', 'ꝃ'), - (0xA743, 'V'), - (0xA744, 'M', 'ꝅ'), - (0xA745, 'V'), - (0xA746, 'M', 'ꝇ'), - (0xA747, 'V'), - (0xA748, 'M', 'ꝉ'), - (0xA749, 'V'), - (0xA74A, 'M', 'ꝋ'), - (0xA74B, 'V'), - (0xA74C, 'M', 'ꝍ'), - (0xA74D, 'V'), - (0xA74E, 'M', 'ꝏ'), - (0xA74F, 'V'), - (0xA750, 'M', 'ꝑ'), - (0xA751, 'V'), - (0xA752, 'M', 'ꝓ'), - (0xA753, 'V'), - (0xA754, 'M', 'ꝕ'), - (0xA755, 'V'), - (0xA756, 'M', 'ꝗ'), - (0xA757, 'V'), - (0xA758, 'M', 'ꝙ'), - (0xA759, 'V'), - (0xA75A, 'M', 'ꝛ'), - (0xA75B, 'V'), - (0xA75C, 'M', 'ꝝ'), - (0xA75D, 'V'), - (0xA75E, 'M', 'ꝟ'), - (0xA75F, 'V'), - (0xA760, 'M', 'ꝡ'), - (0xA761, 'V'), - (0xA762, 'M', 'ꝣ'), - (0xA763, 'V'), - (0xA764, 'M', 'ꝥ'), - (0xA765, 'V'), - (0xA766, 'M', 'ꝧ'), - (0xA767, 'V'), - (0xA768, 'M', 'ꝩ'), - (0xA769, 'V'), - (0xA76A, 'M', 'ꝫ'), - (0xA76B, 'V'), - (0xA76C, 'M', 'ꝭ'), - (0xA76D, 'V'), - (0xA76E, 'M', 'ꝯ'), - (0xA76F, 'V'), - (0xA770, 'M', 'ꝯ'), - (0xA771, 'V'), - (0xA779, 'M', 'ꝺ'), - (0xA77A, 'V'), - (0xA77B, 'M', 'ꝼ'), - ] - -def _seg_37() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA77C, 'V'), - (0xA77D, 'M', 'ᵹ'), - (0xA77E, 'M', 'ꝿ'), - (0xA77F, 'V'), - (0xA780, 'M', 'ꞁ'), - (0xA781, 'V'), - (0xA782, 'M', 'ꞃ'), - (0xA783, 'V'), - (0xA784, 'M', 'ꞅ'), - (0xA785, 'V'), - (0xA786, 'M', 'ꞇ'), - (0xA787, 'V'), - (0xA78B, 'M', 'ꞌ'), - (0xA78C, 'V'), - (0xA78D, 'M', 'ɥ'), - (0xA78E, 'V'), - (0xA790, 'M', 'ꞑ'), - (0xA791, 'V'), - (0xA792, 'M', 'ꞓ'), - (0xA793, 'V'), - (0xA796, 'M', 'ꞗ'), - (0xA797, 'V'), - (0xA798, 'M', 'ꞙ'), - (0xA799, 'V'), - (0xA79A, 'M', 'ꞛ'), - (0xA79B, 'V'), - (0xA79C, 'M', 'ꞝ'), - (0xA79D, 'V'), - (0xA79E, 'M', 'ꞟ'), - (0xA79F, 'V'), - (0xA7A0, 'M', 'ꞡ'), - (0xA7A1, 'V'), - (0xA7A2, 'M', 'ꞣ'), - (0xA7A3, 'V'), - (0xA7A4, 'M', 'ꞥ'), - (0xA7A5, 'V'), - (0xA7A6, 'M', 'ꞧ'), - (0xA7A7, 'V'), - (0xA7A8, 'M', 'ꞩ'), - (0xA7A9, 'V'), - (0xA7AA, 'M', 'ɦ'), - (0xA7AB, 'M', 'ɜ'), - (0xA7AC, 'M', 'ɡ'), - (0xA7AD, 'M', 'ɬ'), - (0xA7AE, 'M', 'ɪ'), - (0xA7AF, 'V'), - (0xA7B0, 'M', 'ʞ'), - (0xA7B1, 'M', 'ʇ'), - (0xA7B2, 'M', 'ʝ'), - (0xA7B3, 'M', 'ꭓ'), - (0xA7B4, 'M', 'ꞵ'), - (0xA7B5, 'V'), - (0xA7B6, 'M', 'ꞷ'), - (0xA7B7, 'V'), - (0xA7B8, 'M', 'ꞹ'), - (0xA7B9, 'V'), - (0xA7BA, 'M', 'ꞻ'), - (0xA7BB, 'V'), - (0xA7BC, 'M', 'ꞽ'), - (0xA7BD, 'V'), - (0xA7BE, 'M', 'ꞿ'), - (0xA7BF, 'V'), - (0xA7C0, 'M', 'ꟁ'), - (0xA7C1, 'V'), - (0xA7C2, 'M', 'ꟃ'), - (0xA7C3, 'V'), - (0xA7C4, 'M', 'ꞔ'), - (0xA7C5, 'M', 'ʂ'), - (0xA7C6, 'M', 'ᶎ'), - (0xA7C7, 'M', 'ꟈ'), - (0xA7C8, 'V'), - (0xA7C9, 'M', 'ꟊ'), - (0xA7CA, 'V'), - (0xA7CB, 'X'), - (0xA7D0, 'M', 'ꟑ'), - (0xA7D1, 'V'), - (0xA7D2, 'X'), - (0xA7D3, 'V'), - (0xA7D4, 'X'), - (0xA7D5, 'V'), - (0xA7D6, 'M', 'ꟗ'), - (0xA7D7, 'V'), - (0xA7D8, 'M', 'ꟙ'), - (0xA7D9, 'V'), - (0xA7DA, 'X'), - (0xA7F2, 'M', 'c'), - (0xA7F3, 'M', 'f'), - (0xA7F4, 'M', 'q'), - (0xA7F5, 'M', 'ꟶ'), - (0xA7F6, 'V'), - (0xA7F8, 'M', 'ħ'), - (0xA7F9, 'M', 'œ'), - (0xA7FA, 'V'), - (0xA82D, 'X'), - (0xA830, 'V'), - (0xA83A, 'X'), - (0xA840, 'V'), - (0xA878, 'X'), - (0xA880, 'V'), - (0xA8C6, 'X'), - ] - -def _seg_38() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA8CE, 'V'), - (0xA8DA, 'X'), - (0xA8E0, 'V'), - (0xA954, 'X'), - (0xA95F, 'V'), - (0xA97D, 'X'), - (0xA980, 'V'), - (0xA9CE, 'X'), - (0xA9CF, 'V'), - (0xA9DA, 'X'), - (0xA9DE, 'V'), - (0xA9FF, 'X'), - (0xAA00, 'V'), - (0xAA37, 'X'), - (0xAA40, 'V'), - (0xAA4E, 'X'), - (0xAA50, 'V'), - (0xAA5A, 'X'), - (0xAA5C, 'V'), - (0xAAC3, 'X'), - (0xAADB, 'V'), - (0xAAF7, 'X'), - (0xAB01, 'V'), - (0xAB07, 'X'), - (0xAB09, 'V'), - (0xAB0F, 'X'), - (0xAB11, 'V'), - (0xAB17, 'X'), - (0xAB20, 'V'), - (0xAB27, 'X'), - (0xAB28, 'V'), - (0xAB2F, 'X'), - (0xAB30, 'V'), - (0xAB5C, 'M', 'ꜧ'), - (0xAB5D, 'M', 'ꬷ'), - (0xAB5E, 'M', 'ɫ'), - (0xAB5F, 'M', 'ꭒ'), - (0xAB60, 'V'), - (0xAB69, 'M', 'ʍ'), - (0xAB6A, 'V'), - (0xAB6C, 'X'), - (0xAB70, 'M', 'Ꭰ'), - (0xAB71, 'M', 'Ꭱ'), - (0xAB72, 'M', 'Ꭲ'), - (0xAB73, 'M', 'Ꭳ'), - (0xAB74, 'M', 'Ꭴ'), - (0xAB75, 'M', 'Ꭵ'), - (0xAB76, 'M', 'Ꭶ'), - (0xAB77, 'M', 'Ꭷ'), - (0xAB78, 'M', 'Ꭸ'), - (0xAB79, 'M', 'Ꭹ'), - (0xAB7A, 'M', 'Ꭺ'), - (0xAB7B, 'M', 'Ꭻ'), - (0xAB7C, 'M', 'Ꭼ'), - (0xAB7D, 'M', 'Ꭽ'), - (0xAB7E, 'M', 'Ꭾ'), - (0xAB7F, 'M', 'Ꭿ'), - (0xAB80, 'M', 'Ꮀ'), - (0xAB81, 'M', 'Ꮁ'), - (0xAB82, 'M', 'Ꮂ'), - (0xAB83, 'M', 'Ꮃ'), - (0xAB84, 'M', 'Ꮄ'), - (0xAB85, 'M', 'Ꮅ'), - (0xAB86, 'M', 'Ꮆ'), - (0xAB87, 'M', 'Ꮇ'), - (0xAB88, 'M', 'Ꮈ'), - (0xAB89, 'M', 'Ꮉ'), - (0xAB8A, 'M', 'Ꮊ'), - (0xAB8B, 'M', 'Ꮋ'), - (0xAB8C, 'M', 'Ꮌ'), - (0xAB8D, 'M', 'Ꮍ'), - (0xAB8E, 'M', 'Ꮎ'), - (0xAB8F, 'M', 'Ꮏ'), - (0xAB90, 'M', 'Ꮐ'), - (0xAB91, 'M', 'Ꮑ'), - (0xAB92, 'M', 'Ꮒ'), - (0xAB93, 'M', 'Ꮓ'), - (0xAB94, 'M', 'Ꮔ'), - (0xAB95, 'M', 'Ꮕ'), - (0xAB96, 'M', 'Ꮖ'), - (0xAB97, 'M', 'Ꮗ'), - (0xAB98, 'M', 'Ꮘ'), - (0xAB99, 'M', 'Ꮙ'), - (0xAB9A, 'M', 'Ꮚ'), - (0xAB9B, 'M', 'Ꮛ'), - (0xAB9C, 'M', 'Ꮜ'), - (0xAB9D, 'M', 'Ꮝ'), - (0xAB9E, 'M', 'Ꮞ'), - (0xAB9F, 'M', 'Ꮟ'), - (0xABA0, 'M', 'Ꮠ'), - (0xABA1, 'M', 'Ꮡ'), - (0xABA2, 'M', 'Ꮢ'), - (0xABA3, 'M', 'Ꮣ'), - (0xABA4, 'M', 'Ꮤ'), - (0xABA5, 'M', 'Ꮥ'), - (0xABA6, 'M', 'Ꮦ'), - (0xABA7, 'M', 'Ꮧ'), - (0xABA8, 'M', 'Ꮨ'), - (0xABA9, 'M', 'Ꮩ'), - (0xABAA, 'M', 'Ꮪ'), - ] - -def _seg_39() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xABAB, 'M', 'Ꮫ'), - (0xABAC, 'M', 'Ꮬ'), - (0xABAD, 'M', 'Ꮭ'), - (0xABAE, 'M', 'Ꮮ'), - (0xABAF, 'M', 'Ꮯ'), - (0xABB0, 'M', 'Ꮰ'), - (0xABB1, 'M', 'Ꮱ'), - (0xABB2, 'M', 'Ꮲ'), - (0xABB3, 'M', 'Ꮳ'), - (0xABB4, 'M', 'Ꮴ'), - (0xABB5, 'M', 'Ꮵ'), - (0xABB6, 'M', 'Ꮶ'), - (0xABB7, 'M', 'Ꮷ'), - (0xABB8, 'M', 'Ꮸ'), - (0xABB9, 'M', 'Ꮹ'), - (0xABBA, 'M', 'Ꮺ'), - (0xABBB, 'M', 'Ꮻ'), - (0xABBC, 'M', 'Ꮼ'), - (0xABBD, 'M', 'Ꮽ'), - (0xABBE, 'M', 'Ꮾ'), - (0xABBF, 'M', 'Ꮿ'), - (0xABC0, 'V'), - (0xABEE, 'X'), - (0xABF0, 'V'), - (0xABFA, 'X'), - (0xAC00, 'V'), - (0xD7A4, 'X'), - (0xD7B0, 'V'), - (0xD7C7, 'X'), - (0xD7CB, 'V'), - (0xD7FC, 'X'), - (0xF900, 'M', '豈'), - (0xF901, 'M', '更'), - (0xF902, 'M', '車'), - (0xF903, 'M', '賈'), - (0xF904, 'M', '滑'), - (0xF905, 'M', '串'), - (0xF906, 'M', '句'), - (0xF907, 'M', '龜'), - (0xF909, 'M', '契'), - (0xF90A, 'M', '金'), - (0xF90B, 'M', '喇'), - (0xF90C, 'M', '奈'), - (0xF90D, 'M', '懶'), - (0xF90E, 'M', '癩'), - (0xF90F, 'M', '羅'), - (0xF910, 'M', '蘿'), - (0xF911, 'M', '螺'), - (0xF912, 'M', '裸'), - (0xF913, 'M', '邏'), - (0xF914, 'M', '樂'), - (0xF915, 'M', '洛'), - (0xF916, 'M', '烙'), - (0xF917, 'M', '珞'), - (0xF918, 'M', '落'), - (0xF919, 'M', '酪'), - (0xF91A, 'M', '駱'), - (0xF91B, 'M', '亂'), - (0xF91C, 'M', '卵'), - (0xF91D, 'M', '欄'), - (0xF91E, 'M', '爛'), - (0xF91F, 'M', '蘭'), - (0xF920, 'M', '鸞'), - (0xF921, 'M', '嵐'), - (0xF922, 'M', '濫'), - (0xF923, 'M', '藍'), - (0xF924, 'M', '襤'), - (0xF925, 'M', '拉'), - (0xF926, 'M', '臘'), - (0xF927, 'M', '蠟'), - (0xF928, 'M', '廊'), - (0xF929, 'M', '朗'), - (0xF92A, 'M', '浪'), - (0xF92B, 'M', '狼'), - (0xF92C, 'M', '郎'), - (0xF92D, 'M', '來'), - (0xF92E, 'M', '冷'), - (0xF92F, 'M', '勞'), - (0xF930, 'M', '擄'), - (0xF931, 'M', '櫓'), - (0xF932, 'M', '爐'), - (0xF933, 'M', '盧'), - (0xF934, 'M', '老'), - (0xF935, 'M', '蘆'), - (0xF936, 'M', '虜'), - (0xF937, 'M', '路'), - (0xF938, 'M', '露'), - (0xF939, 'M', '魯'), - (0xF93A, 'M', '鷺'), - (0xF93B, 'M', '碌'), - (0xF93C, 'M', '祿'), - (0xF93D, 'M', '綠'), - (0xF93E, 'M', '菉'), - (0xF93F, 'M', '錄'), - (0xF940, 'M', '鹿'), - (0xF941, 'M', '論'), - (0xF942, 'M', '壟'), - (0xF943, 'M', '弄'), - (0xF944, 'M', '籠'), - (0xF945, 'M', '聾'), - ] - -def _seg_40() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xF946, 'M', '牢'), - (0xF947, 'M', '磊'), - (0xF948, 'M', '賂'), - (0xF949, 'M', '雷'), - (0xF94A, 'M', '壘'), - (0xF94B, 'M', '屢'), - (0xF94C, 'M', '樓'), - (0xF94D, 'M', '淚'), - (0xF94E, 'M', '漏'), - (0xF94F, 'M', '累'), - (0xF950, 'M', '縷'), - (0xF951, 'M', '陋'), - (0xF952, 'M', '勒'), - (0xF953, 'M', '肋'), - (0xF954, 'M', '凜'), - (0xF955, 'M', '凌'), - (0xF956, 'M', '稜'), - (0xF957, 'M', '綾'), - (0xF958, 'M', '菱'), - (0xF959, 'M', '陵'), - (0xF95A, 'M', '讀'), - (0xF95B, 'M', '拏'), - (0xF95C, 'M', '樂'), - (0xF95D, 'M', '諾'), - (0xF95E, 'M', '丹'), - (0xF95F, 'M', '寧'), - (0xF960, 'M', '怒'), - (0xF961, 'M', '率'), - (0xF962, 'M', '異'), - (0xF963, 'M', '北'), - (0xF964, 'M', '磻'), - (0xF965, 'M', '便'), - (0xF966, 'M', '復'), - (0xF967, 'M', '不'), - (0xF968, 'M', '泌'), - (0xF969, 'M', '數'), - (0xF96A, 'M', '索'), - (0xF96B, 'M', '參'), - (0xF96C, 'M', '塞'), - (0xF96D, 'M', '省'), - (0xF96E, 'M', '葉'), - (0xF96F, 'M', '說'), - (0xF970, 'M', '殺'), - (0xF971, 'M', '辰'), - (0xF972, 'M', '沈'), - (0xF973, 'M', '拾'), - (0xF974, 'M', '若'), - (0xF975, 'M', '掠'), - (0xF976, 'M', '略'), - (0xF977, 'M', '亮'), - (0xF978, 'M', '兩'), - (0xF979, 'M', '凉'), - (0xF97A, 'M', '梁'), - (0xF97B, 'M', '糧'), - (0xF97C, 'M', '良'), - (0xF97D, 'M', '諒'), - (0xF97E, 'M', '量'), - (0xF97F, 'M', '勵'), - (0xF980, 'M', '呂'), - (0xF981, 'M', '女'), - (0xF982, 'M', '廬'), - (0xF983, 'M', '旅'), - (0xF984, 'M', '濾'), - (0xF985, 'M', '礪'), - (0xF986, 'M', '閭'), - (0xF987, 'M', '驪'), - (0xF988, 'M', '麗'), - (0xF989, 'M', '黎'), - (0xF98A, 'M', '力'), - (0xF98B, 'M', '曆'), - (0xF98C, 'M', '歷'), - (0xF98D, 'M', '轢'), - (0xF98E, 'M', '年'), - (0xF98F, 'M', '憐'), - (0xF990, 'M', '戀'), - (0xF991, 'M', '撚'), - (0xF992, 'M', '漣'), - (0xF993, 'M', '煉'), - (0xF994, 'M', '璉'), - (0xF995, 'M', '秊'), - (0xF996, 'M', '練'), - (0xF997, 'M', '聯'), - (0xF998, 'M', '輦'), - (0xF999, 'M', '蓮'), - (0xF99A, 'M', '連'), - (0xF99B, 'M', '鍊'), - (0xF99C, 'M', '列'), - (0xF99D, 'M', '劣'), - (0xF99E, 'M', '咽'), - (0xF99F, 'M', '烈'), - (0xF9A0, 'M', '裂'), - (0xF9A1, 'M', '說'), - (0xF9A2, 'M', '廉'), - (0xF9A3, 'M', '念'), - (0xF9A4, 'M', '捻'), - (0xF9A5, 'M', '殮'), - (0xF9A6, 'M', '簾'), - (0xF9A7, 'M', '獵'), - (0xF9A8, 'M', '令'), - (0xF9A9, 'M', '囹'), - ] - -def _seg_41() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xF9AA, 'M', '寧'), - (0xF9AB, 'M', '嶺'), - (0xF9AC, 'M', '怜'), - (0xF9AD, 'M', '玲'), - (0xF9AE, 'M', '瑩'), - (0xF9AF, 'M', '羚'), - (0xF9B0, 'M', '聆'), - (0xF9B1, 'M', '鈴'), - (0xF9B2, 'M', '零'), - (0xF9B3, 'M', '靈'), - (0xF9B4, 'M', '領'), - (0xF9B5, 'M', '例'), - (0xF9B6, 'M', '禮'), - (0xF9B7, 'M', '醴'), - (0xF9B8, 'M', '隸'), - (0xF9B9, 'M', '惡'), - (0xF9BA, 'M', '了'), - (0xF9BB, 'M', '僚'), - (0xF9BC, 'M', '寮'), - (0xF9BD, 'M', '尿'), - (0xF9BE, 'M', '料'), - (0xF9BF, 'M', '樂'), - (0xF9C0, 'M', '燎'), - (0xF9C1, 'M', '療'), - (0xF9C2, 'M', '蓼'), - (0xF9C3, 'M', '遼'), - (0xF9C4, 'M', '龍'), - (0xF9C5, 'M', '暈'), - (0xF9C6, 'M', '阮'), - (0xF9C7, 'M', '劉'), - (0xF9C8, 'M', '杻'), - (0xF9C9, 'M', '柳'), - (0xF9CA, 'M', '流'), - (0xF9CB, 'M', '溜'), - (0xF9CC, 'M', '琉'), - (0xF9CD, 'M', '留'), - (0xF9CE, 'M', '硫'), - (0xF9CF, 'M', '紐'), - (0xF9D0, 'M', '類'), - (0xF9D1, 'M', '六'), - (0xF9D2, 'M', '戮'), - (0xF9D3, 'M', '陸'), - (0xF9D4, 'M', '倫'), - (0xF9D5, 'M', '崙'), - (0xF9D6, 'M', '淪'), - (0xF9D7, 'M', '輪'), - (0xF9D8, 'M', '律'), - (0xF9D9, 'M', '慄'), - (0xF9DA, 'M', '栗'), - (0xF9DB, 'M', '率'), - (0xF9DC, 'M', '隆'), - (0xF9DD, 'M', '利'), - (0xF9DE, 'M', '吏'), - (0xF9DF, 'M', '履'), - (0xF9E0, 'M', '易'), - (0xF9E1, 'M', '李'), - (0xF9E2, 'M', '梨'), - (0xF9E3, 'M', '泥'), - (0xF9E4, 'M', '理'), - (0xF9E5, 'M', '痢'), - (0xF9E6, 'M', '罹'), - (0xF9E7, 'M', '裏'), - (0xF9E8, 'M', '裡'), - (0xF9E9, 'M', '里'), - (0xF9EA, 'M', '離'), - (0xF9EB, 'M', '匿'), - (0xF9EC, 'M', '溺'), - (0xF9ED, 'M', '吝'), - (0xF9EE, 'M', '燐'), - (0xF9EF, 'M', '璘'), - (0xF9F0, 'M', '藺'), - (0xF9F1, 'M', '隣'), - (0xF9F2, 'M', '鱗'), - (0xF9F3, 'M', '麟'), - (0xF9F4, 'M', '林'), - (0xF9F5, 'M', '淋'), - (0xF9F6, 'M', '臨'), - (0xF9F7, 'M', '立'), - (0xF9F8, 'M', '笠'), - (0xF9F9, 'M', '粒'), - (0xF9FA, 'M', '狀'), - (0xF9FB, 'M', '炙'), - (0xF9FC, 'M', '識'), - (0xF9FD, 'M', '什'), - (0xF9FE, 'M', '茶'), - (0xF9FF, 'M', '刺'), - (0xFA00, 'M', '切'), - (0xFA01, 'M', '度'), - (0xFA02, 'M', '拓'), - (0xFA03, 'M', '糖'), - (0xFA04, 'M', '宅'), - (0xFA05, 'M', '洞'), - (0xFA06, 'M', '暴'), - (0xFA07, 'M', '輻'), - (0xFA08, 'M', '行'), - (0xFA09, 'M', '降'), - (0xFA0A, 'M', '見'), - (0xFA0B, 'M', '廓'), - (0xFA0C, 'M', '兀'), - (0xFA0D, 'M', '嗀'), - ] - -def _seg_42() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFA0E, 'V'), - (0xFA10, 'M', '塚'), - (0xFA11, 'V'), - (0xFA12, 'M', '晴'), - (0xFA13, 'V'), - (0xFA15, 'M', '凞'), - (0xFA16, 'M', '猪'), - (0xFA17, 'M', '益'), - (0xFA18, 'M', '礼'), - (0xFA19, 'M', '神'), - (0xFA1A, 'M', '祥'), - (0xFA1B, 'M', '福'), - (0xFA1C, 'M', '靖'), - (0xFA1D, 'M', '精'), - (0xFA1E, 'M', '羽'), - (0xFA1F, 'V'), - (0xFA20, 'M', '蘒'), - (0xFA21, 'V'), - (0xFA22, 'M', '諸'), - (0xFA23, 'V'), - (0xFA25, 'M', '逸'), - (0xFA26, 'M', '都'), - (0xFA27, 'V'), - (0xFA2A, 'M', '飯'), - (0xFA2B, 'M', '飼'), - (0xFA2C, 'M', '館'), - (0xFA2D, 'M', '鶴'), - (0xFA2E, 'M', '郞'), - (0xFA2F, 'M', '隷'), - (0xFA30, 'M', '侮'), - (0xFA31, 'M', '僧'), - (0xFA32, 'M', '免'), - (0xFA33, 'M', '勉'), - (0xFA34, 'M', '勤'), - (0xFA35, 'M', '卑'), - (0xFA36, 'M', '喝'), - (0xFA37, 'M', '嘆'), - (0xFA38, 'M', '器'), - (0xFA39, 'M', '塀'), - (0xFA3A, 'M', '墨'), - (0xFA3B, 'M', '層'), - (0xFA3C, 'M', '屮'), - (0xFA3D, 'M', '悔'), - (0xFA3E, 'M', '慨'), - (0xFA3F, 'M', '憎'), - (0xFA40, 'M', '懲'), - (0xFA41, 'M', '敏'), - (0xFA42, 'M', '既'), - (0xFA43, 'M', '暑'), - (0xFA44, 'M', '梅'), - (0xFA45, 'M', '海'), - (0xFA46, 'M', '渚'), - (0xFA47, 'M', '漢'), - (0xFA48, 'M', '煮'), - (0xFA49, 'M', '爫'), - (0xFA4A, 'M', '琢'), - (0xFA4B, 'M', '碑'), - (0xFA4C, 'M', '社'), - (0xFA4D, 'M', '祉'), - (0xFA4E, 'M', '祈'), - (0xFA4F, 'M', '祐'), - (0xFA50, 'M', '祖'), - (0xFA51, 'M', '祝'), - (0xFA52, 'M', '禍'), - (0xFA53, 'M', '禎'), - (0xFA54, 'M', '穀'), - (0xFA55, 'M', '突'), - (0xFA56, 'M', '節'), - (0xFA57, 'M', '練'), - (0xFA58, 'M', '縉'), - (0xFA59, 'M', '繁'), - (0xFA5A, 'M', '署'), - (0xFA5B, 'M', '者'), - (0xFA5C, 'M', '臭'), - (0xFA5D, 'M', '艹'), - (0xFA5F, 'M', '著'), - (0xFA60, 'M', '褐'), - (0xFA61, 'M', '視'), - (0xFA62, 'M', '謁'), - (0xFA63, 'M', '謹'), - (0xFA64, 'M', '賓'), - (0xFA65, 'M', '贈'), - (0xFA66, 'M', '辶'), - (0xFA67, 'M', '逸'), - (0xFA68, 'M', '難'), - (0xFA69, 'M', '響'), - (0xFA6A, 'M', '頻'), - (0xFA6B, 'M', '恵'), - (0xFA6C, 'M', '𤋮'), - (0xFA6D, 'M', '舘'), - (0xFA6E, 'X'), - (0xFA70, 'M', '並'), - (0xFA71, 'M', '况'), - (0xFA72, 'M', '全'), - (0xFA73, 'M', '侀'), - (0xFA74, 'M', '充'), - (0xFA75, 'M', '冀'), - (0xFA76, 'M', '勇'), - (0xFA77, 'M', '勺'), - (0xFA78, 'M', '喝'), - ] - -def _seg_43() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFA79, 'M', '啕'), - (0xFA7A, 'M', '喙'), - (0xFA7B, 'M', '嗢'), - (0xFA7C, 'M', '塚'), - (0xFA7D, 'M', '墳'), - (0xFA7E, 'M', '奄'), - (0xFA7F, 'M', '奔'), - (0xFA80, 'M', '婢'), - (0xFA81, 'M', '嬨'), - (0xFA82, 'M', '廒'), - (0xFA83, 'M', '廙'), - (0xFA84, 'M', '彩'), - (0xFA85, 'M', '徭'), - (0xFA86, 'M', '惘'), - (0xFA87, 'M', '慎'), - (0xFA88, 'M', '愈'), - (0xFA89, 'M', '憎'), - (0xFA8A, 'M', '慠'), - (0xFA8B, 'M', '懲'), - (0xFA8C, 'M', '戴'), - (0xFA8D, 'M', '揄'), - (0xFA8E, 'M', '搜'), - (0xFA8F, 'M', '摒'), - (0xFA90, 'M', '敖'), - (0xFA91, 'M', '晴'), - (0xFA92, 'M', '朗'), - (0xFA93, 'M', '望'), - (0xFA94, 'M', '杖'), - (0xFA95, 'M', '歹'), - (0xFA96, 'M', '殺'), - (0xFA97, 'M', '流'), - (0xFA98, 'M', '滛'), - (0xFA99, 'M', '滋'), - (0xFA9A, 'M', '漢'), - (0xFA9B, 'M', '瀞'), - (0xFA9C, 'M', '煮'), - (0xFA9D, 'M', '瞧'), - (0xFA9E, 'M', '爵'), - (0xFA9F, 'M', '犯'), - (0xFAA0, 'M', '猪'), - (0xFAA1, 'M', '瑱'), - (0xFAA2, 'M', '甆'), - (0xFAA3, 'M', '画'), - (0xFAA4, 'M', '瘝'), - (0xFAA5, 'M', '瘟'), - (0xFAA6, 'M', '益'), - (0xFAA7, 'M', '盛'), - (0xFAA8, 'M', '直'), - (0xFAA9, 'M', '睊'), - (0xFAAA, 'M', '着'), - (0xFAAB, 'M', '磌'), - (0xFAAC, 'M', '窱'), - (0xFAAD, 'M', '節'), - (0xFAAE, 'M', '类'), - (0xFAAF, 'M', '絛'), - (0xFAB0, 'M', '練'), - (0xFAB1, 'M', '缾'), - (0xFAB2, 'M', '者'), - (0xFAB3, 'M', '荒'), - (0xFAB4, 'M', '華'), - (0xFAB5, 'M', '蝹'), - (0xFAB6, 'M', '襁'), - (0xFAB7, 'M', '覆'), - (0xFAB8, 'M', '視'), - (0xFAB9, 'M', '調'), - (0xFABA, 'M', '諸'), - (0xFABB, 'M', '請'), - (0xFABC, 'M', '謁'), - (0xFABD, 'M', '諾'), - (0xFABE, 'M', '諭'), - (0xFABF, 'M', '謹'), - (0xFAC0, 'M', '變'), - (0xFAC1, 'M', '贈'), - (0xFAC2, 'M', '輸'), - (0xFAC3, 'M', '遲'), - (0xFAC4, 'M', '醙'), - (0xFAC5, 'M', '鉶'), - (0xFAC6, 'M', '陼'), - (0xFAC7, 'M', '難'), - (0xFAC8, 'M', '靖'), - (0xFAC9, 'M', '韛'), - (0xFACA, 'M', '響'), - (0xFACB, 'M', '頋'), - (0xFACC, 'M', '頻'), - (0xFACD, 'M', '鬒'), - (0xFACE, 'M', '龜'), - (0xFACF, 'M', '𢡊'), - (0xFAD0, 'M', '𢡄'), - (0xFAD1, 'M', '𣏕'), - (0xFAD2, 'M', '㮝'), - (0xFAD3, 'M', '䀘'), - (0xFAD4, 'M', '䀹'), - (0xFAD5, 'M', '𥉉'), - (0xFAD6, 'M', '𥳐'), - (0xFAD7, 'M', '𧻓'), - (0xFAD8, 'M', '齃'), - (0xFAD9, 'M', '龎'), - (0xFADA, 'X'), - (0xFB00, 'M', 'ff'), - (0xFB01, 'M', 'fi'), - ] - -def _seg_44() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFB02, 'M', 'fl'), - (0xFB03, 'M', 'ffi'), - (0xFB04, 'M', 'ffl'), - (0xFB05, 'M', 'st'), - (0xFB07, 'X'), - (0xFB13, 'M', 'մն'), - (0xFB14, 'M', 'մե'), - (0xFB15, 'M', 'մի'), - (0xFB16, 'M', 'վն'), - (0xFB17, 'M', 'մխ'), - (0xFB18, 'X'), - (0xFB1D, 'M', 'יִ'), - (0xFB1E, 'V'), - (0xFB1F, 'M', 'ײַ'), - (0xFB20, 'M', 'ע'), - (0xFB21, 'M', 'א'), - (0xFB22, 'M', 'ד'), - (0xFB23, 'M', 'ה'), - (0xFB24, 'M', 'כ'), - (0xFB25, 'M', 'ל'), - (0xFB26, 'M', 'ם'), - (0xFB27, 'M', 'ר'), - (0xFB28, 'M', 'ת'), - (0xFB29, '3', '+'), - (0xFB2A, 'M', 'שׁ'), - (0xFB2B, 'M', 'שׂ'), - (0xFB2C, 'M', 'שּׁ'), - (0xFB2D, 'M', 'שּׂ'), - (0xFB2E, 'M', 'אַ'), - (0xFB2F, 'M', 'אָ'), - (0xFB30, 'M', 'אּ'), - (0xFB31, 'M', 'בּ'), - (0xFB32, 'M', 'גּ'), - (0xFB33, 'M', 'דּ'), - (0xFB34, 'M', 'הּ'), - (0xFB35, 'M', 'וּ'), - (0xFB36, 'M', 'זּ'), - (0xFB37, 'X'), - (0xFB38, 'M', 'טּ'), - (0xFB39, 'M', 'יּ'), - (0xFB3A, 'M', 'ךּ'), - (0xFB3B, 'M', 'כּ'), - (0xFB3C, 'M', 'לּ'), - (0xFB3D, 'X'), - (0xFB3E, 'M', 'מּ'), - (0xFB3F, 'X'), - (0xFB40, 'M', 'נּ'), - (0xFB41, 'M', 'סּ'), - (0xFB42, 'X'), - (0xFB43, 'M', 'ףּ'), - (0xFB44, 'M', 'פּ'), - (0xFB45, 'X'), - (0xFB46, 'M', 'צּ'), - (0xFB47, 'M', 'קּ'), - (0xFB48, 'M', 'רּ'), - (0xFB49, 'M', 'שּ'), - (0xFB4A, 'M', 'תּ'), - (0xFB4B, 'M', 'וֹ'), - (0xFB4C, 'M', 'בֿ'), - (0xFB4D, 'M', 'כֿ'), - (0xFB4E, 'M', 'פֿ'), - (0xFB4F, 'M', 'אל'), - (0xFB50, 'M', 'ٱ'), - (0xFB52, 'M', 'ٻ'), - (0xFB56, 'M', 'پ'), - (0xFB5A, 'M', 'ڀ'), - (0xFB5E, 'M', 'ٺ'), - (0xFB62, 'M', 'ٿ'), - (0xFB66, 'M', 'ٹ'), - (0xFB6A, 'M', 'ڤ'), - (0xFB6E, 'M', 'ڦ'), - (0xFB72, 'M', 'ڄ'), - (0xFB76, 'M', 'ڃ'), - (0xFB7A, 'M', 'چ'), - (0xFB7E, 'M', 'ڇ'), - (0xFB82, 'M', 'ڍ'), - (0xFB84, 'M', 'ڌ'), - (0xFB86, 'M', 'ڎ'), - (0xFB88, 'M', 'ڈ'), - (0xFB8A, 'M', 'ژ'), - (0xFB8C, 'M', 'ڑ'), - (0xFB8E, 'M', 'ک'), - (0xFB92, 'M', 'گ'), - (0xFB96, 'M', 'ڳ'), - (0xFB9A, 'M', 'ڱ'), - (0xFB9E, 'M', 'ں'), - (0xFBA0, 'M', 'ڻ'), - (0xFBA4, 'M', 'ۀ'), - (0xFBA6, 'M', 'ہ'), - (0xFBAA, 'M', 'ھ'), - (0xFBAE, 'M', 'ے'), - (0xFBB0, 'M', 'ۓ'), - (0xFBB2, 'V'), - (0xFBC3, 'X'), - (0xFBD3, 'M', 'ڭ'), - (0xFBD7, 'M', 'ۇ'), - (0xFBD9, 'M', 'ۆ'), - (0xFBDB, 'M', 'ۈ'), - (0xFBDD, 'M', 'ۇٴ'), - (0xFBDE, 'M', 'ۋ'), - ] - -def _seg_45() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFBE0, 'M', 'ۅ'), - (0xFBE2, 'M', 'ۉ'), - (0xFBE4, 'M', 'ې'), - (0xFBE8, 'M', 'ى'), - (0xFBEA, 'M', 'ئا'), - (0xFBEC, 'M', 'ئە'), - (0xFBEE, 'M', 'ئو'), - (0xFBF0, 'M', 'ئۇ'), - (0xFBF2, 'M', 'ئۆ'), - (0xFBF4, 'M', 'ئۈ'), - (0xFBF6, 'M', 'ئې'), - (0xFBF9, 'M', 'ئى'), - (0xFBFC, 'M', 'ی'), - (0xFC00, 'M', 'ئج'), - (0xFC01, 'M', 'ئح'), - (0xFC02, 'M', 'ئم'), - (0xFC03, 'M', 'ئى'), - (0xFC04, 'M', 'ئي'), - (0xFC05, 'M', 'بج'), - (0xFC06, 'M', 'بح'), - (0xFC07, 'M', 'بخ'), - (0xFC08, 'M', 'بم'), - (0xFC09, 'M', 'بى'), - (0xFC0A, 'M', 'بي'), - (0xFC0B, 'M', 'تج'), - (0xFC0C, 'M', 'تح'), - (0xFC0D, 'M', 'تخ'), - (0xFC0E, 'M', 'تم'), - (0xFC0F, 'M', 'تى'), - (0xFC10, 'M', 'تي'), - (0xFC11, 'M', 'ثج'), - (0xFC12, 'M', 'ثم'), - (0xFC13, 'M', 'ثى'), - (0xFC14, 'M', 'ثي'), - (0xFC15, 'M', 'جح'), - (0xFC16, 'M', 'جم'), - (0xFC17, 'M', 'حج'), - (0xFC18, 'M', 'حم'), - (0xFC19, 'M', 'خج'), - (0xFC1A, 'M', 'خح'), - (0xFC1B, 'M', 'خم'), - (0xFC1C, 'M', 'سج'), - (0xFC1D, 'M', 'سح'), - (0xFC1E, 'M', 'سخ'), - (0xFC1F, 'M', 'سم'), - (0xFC20, 'M', 'صح'), - (0xFC21, 'M', 'صم'), - (0xFC22, 'M', 'ضج'), - (0xFC23, 'M', 'ضح'), - (0xFC24, 'M', 'ضخ'), - (0xFC25, 'M', 'ضم'), - (0xFC26, 'M', 'طح'), - (0xFC27, 'M', 'طم'), - (0xFC28, 'M', 'ظم'), - (0xFC29, 'M', 'عج'), - (0xFC2A, 'M', 'عم'), - (0xFC2B, 'M', 'غج'), - (0xFC2C, 'M', 'غم'), - (0xFC2D, 'M', 'فج'), - (0xFC2E, 'M', 'فح'), - (0xFC2F, 'M', 'فخ'), - (0xFC30, 'M', 'فم'), - (0xFC31, 'M', 'فى'), - (0xFC32, 'M', 'في'), - (0xFC33, 'M', 'قح'), - (0xFC34, 'M', 'قم'), - (0xFC35, 'M', 'قى'), - (0xFC36, 'M', 'قي'), - (0xFC37, 'M', 'كا'), - (0xFC38, 'M', 'كج'), - (0xFC39, 'M', 'كح'), - (0xFC3A, 'M', 'كخ'), - (0xFC3B, 'M', 'كل'), - (0xFC3C, 'M', 'كم'), - (0xFC3D, 'M', 'كى'), - (0xFC3E, 'M', 'كي'), - (0xFC3F, 'M', 'لج'), - (0xFC40, 'M', 'لح'), - (0xFC41, 'M', 'لخ'), - (0xFC42, 'M', 'لم'), - (0xFC43, 'M', 'لى'), - (0xFC44, 'M', 'لي'), - (0xFC45, 'M', 'مج'), - (0xFC46, 'M', 'مح'), - (0xFC47, 'M', 'مخ'), - (0xFC48, 'M', 'مم'), - (0xFC49, 'M', 'مى'), - (0xFC4A, 'M', 'مي'), - (0xFC4B, 'M', 'نج'), - (0xFC4C, 'M', 'نح'), - (0xFC4D, 'M', 'نخ'), - (0xFC4E, 'M', 'نم'), - (0xFC4F, 'M', 'نى'), - (0xFC50, 'M', 'ني'), - (0xFC51, 'M', 'هج'), - (0xFC52, 'M', 'هم'), - (0xFC53, 'M', 'هى'), - (0xFC54, 'M', 'هي'), - (0xFC55, 'M', 'يج'), - (0xFC56, 'M', 'يح'), - ] - -def _seg_46() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFC57, 'M', 'يخ'), - (0xFC58, 'M', 'يم'), - (0xFC59, 'M', 'يى'), - (0xFC5A, 'M', 'يي'), - (0xFC5B, 'M', 'ذٰ'), - (0xFC5C, 'M', 'رٰ'), - (0xFC5D, 'M', 'ىٰ'), - (0xFC5E, '3', ' ٌّ'), - (0xFC5F, '3', ' ٍّ'), - (0xFC60, '3', ' َّ'), - (0xFC61, '3', ' ُّ'), - (0xFC62, '3', ' ِّ'), - (0xFC63, '3', ' ّٰ'), - (0xFC64, 'M', 'ئر'), - (0xFC65, 'M', 'ئز'), - (0xFC66, 'M', 'ئم'), - (0xFC67, 'M', 'ئن'), - (0xFC68, 'M', 'ئى'), - (0xFC69, 'M', 'ئي'), - (0xFC6A, 'M', 'بر'), - (0xFC6B, 'M', 'بز'), - (0xFC6C, 'M', 'بم'), - (0xFC6D, 'M', 'بن'), - (0xFC6E, 'M', 'بى'), - (0xFC6F, 'M', 'بي'), - (0xFC70, 'M', 'تر'), - (0xFC71, 'M', 'تز'), - (0xFC72, 'M', 'تم'), - (0xFC73, 'M', 'تن'), - (0xFC74, 'M', 'تى'), - (0xFC75, 'M', 'تي'), - (0xFC76, 'M', 'ثر'), - (0xFC77, 'M', 'ثز'), - (0xFC78, 'M', 'ثم'), - (0xFC79, 'M', 'ثن'), - (0xFC7A, 'M', 'ثى'), - (0xFC7B, 'M', 'ثي'), - (0xFC7C, 'M', 'فى'), - (0xFC7D, 'M', 'في'), - (0xFC7E, 'M', 'قى'), - (0xFC7F, 'M', 'قي'), - (0xFC80, 'M', 'كا'), - (0xFC81, 'M', 'كل'), - (0xFC82, 'M', 'كم'), - (0xFC83, 'M', 'كى'), - (0xFC84, 'M', 'كي'), - (0xFC85, 'M', 'لم'), - (0xFC86, 'M', 'لى'), - (0xFC87, 'M', 'لي'), - (0xFC88, 'M', 'ما'), - (0xFC89, 'M', 'مم'), - (0xFC8A, 'M', 'نر'), - (0xFC8B, 'M', 'نز'), - (0xFC8C, 'M', 'نم'), - (0xFC8D, 'M', 'نن'), - (0xFC8E, 'M', 'نى'), - (0xFC8F, 'M', 'ني'), - (0xFC90, 'M', 'ىٰ'), - (0xFC91, 'M', 'ير'), - (0xFC92, 'M', 'يز'), - (0xFC93, 'M', 'يم'), - (0xFC94, 'M', 'ين'), - (0xFC95, 'M', 'يى'), - (0xFC96, 'M', 'يي'), - (0xFC97, 'M', 'ئج'), - (0xFC98, 'M', 'ئح'), - (0xFC99, 'M', 'ئخ'), - (0xFC9A, 'M', 'ئم'), - (0xFC9B, 'M', 'ئه'), - (0xFC9C, 'M', 'بج'), - (0xFC9D, 'M', 'بح'), - (0xFC9E, 'M', 'بخ'), - (0xFC9F, 'M', 'بم'), - (0xFCA0, 'M', 'به'), - (0xFCA1, 'M', 'تج'), - (0xFCA2, 'M', 'تح'), - (0xFCA3, 'M', 'تخ'), - (0xFCA4, 'M', 'تم'), - (0xFCA5, 'M', 'ته'), - (0xFCA6, 'M', 'ثم'), - (0xFCA7, 'M', 'جح'), - (0xFCA8, 'M', 'جم'), - (0xFCA9, 'M', 'حج'), - (0xFCAA, 'M', 'حم'), - (0xFCAB, 'M', 'خج'), - (0xFCAC, 'M', 'خم'), - (0xFCAD, 'M', 'سج'), - (0xFCAE, 'M', 'سح'), - (0xFCAF, 'M', 'سخ'), - (0xFCB0, 'M', 'سم'), - (0xFCB1, 'M', 'صح'), - (0xFCB2, 'M', 'صخ'), - (0xFCB3, 'M', 'صم'), - (0xFCB4, 'M', 'ضج'), - (0xFCB5, 'M', 'ضح'), - (0xFCB6, 'M', 'ضخ'), - (0xFCB7, 'M', 'ضم'), - (0xFCB8, 'M', 'طح'), - (0xFCB9, 'M', 'ظم'), - (0xFCBA, 'M', 'عج'), - ] - -def _seg_47() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFCBB, 'M', 'عم'), - (0xFCBC, 'M', 'غج'), - (0xFCBD, 'M', 'غم'), - (0xFCBE, 'M', 'فج'), - (0xFCBF, 'M', 'فح'), - (0xFCC0, 'M', 'فخ'), - (0xFCC1, 'M', 'فم'), - (0xFCC2, 'M', 'قح'), - (0xFCC3, 'M', 'قم'), - (0xFCC4, 'M', 'كج'), - (0xFCC5, 'M', 'كح'), - (0xFCC6, 'M', 'كخ'), - (0xFCC7, 'M', 'كل'), - (0xFCC8, 'M', 'كم'), - (0xFCC9, 'M', 'لج'), - (0xFCCA, 'M', 'لح'), - (0xFCCB, 'M', 'لخ'), - (0xFCCC, 'M', 'لم'), - (0xFCCD, 'M', 'له'), - (0xFCCE, 'M', 'مج'), - (0xFCCF, 'M', 'مح'), - (0xFCD0, 'M', 'مخ'), - (0xFCD1, 'M', 'مم'), - (0xFCD2, 'M', 'نج'), - (0xFCD3, 'M', 'نح'), - (0xFCD4, 'M', 'نخ'), - (0xFCD5, 'M', 'نم'), - (0xFCD6, 'M', 'نه'), - (0xFCD7, 'M', 'هج'), - (0xFCD8, 'M', 'هم'), - (0xFCD9, 'M', 'هٰ'), - (0xFCDA, 'M', 'يج'), - (0xFCDB, 'M', 'يح'), - (0xFCDC, 'M', 'يخ'), - (0xFCDD, 'M', 'يم'), - (0xFCDE, 'M', 'يه'), - (0xFCDF, 'M', 'ئم'), - (0xFCE0, 'M', 'ئه'), - (0xFCE1, 'M', 'بم'), - (0xFCE2, 'M', 'به'), - (0xFCE3, 'M', 'تم'), - (0xFCE4, 'M', 'ته'), - (0xFCE5, 'M', 'ثم'), - (0xFCE6, 'M', 'ثه'), - (0xFCE7, 'M', 'سم'), - (0xFCE8, 'M', 'سه'), - (0xFCE9, 'M', 'شم'), - (0xFCEA, 'M', 'شه'), - (0xFCEB, 'M', 'كل'), - (0xFCEC, 'M', 'كم'), - (0xFCED, 'M', 'لم'), - (0xFCEE, 'M', 'نم'), - (0xFCEF, 'M', 'نه'), - (0xFCF0, 'M', 'يم'), - (0xFCF1, 'M', 'يه'), - (0xFCF2, 'M', 'ـَّ'), - (0xFCF3, 'M', 'ـُّ'), - (0xFCF4, 'M', 'ـِّ'), - (0xFCF5, 'M', 'طى'), - (0xFCF6, 'M', 'طي'), - (0xFCF7, 'M', 'عى'), - (0xFCF8, 'M', 'عي'), - (0xFCF9, 'M', 'غى'), - (0xFCFA, 'M', 'غي'), - (0xFCFB, 'M', 'سى'), - (0xFCFC, 'M', 'سي'), - (0xFCFD, 'M', 'شى'), - (0xFCFE, 'M', 'شي'), - (0xFCFF, 'M', 'حى'), - (0xFD00, 'M', 'حي'), - (0xFD01, 'M', 'جى'), - (0xFD02, 'M', 'جي'), - (0xFD03, 'M', 'خى'), - (0xFD04, 'M', 'خي'), - (0xFD05, 'M', 'صى'), - (0xFD06, 'M', 'صي'), - (0xFD07, 'M', 'ضى'), - (0xFD08, 'M', 'ضي'), - (0xFD09, 'M', 'شج'), - (0xFD0A, 'M', 'شح'), - (0xFD0B, 'M', 'شخ'), - (0xFD0C, 'M', 'شم'), - (0xFD0D, 'M', 'شر'), - (0xFD0E, 'M', 'سر'), - (0xFD0F, 'M', 'صر'), - (0xFD10, 'M', 'ضر'), - (0xFD11, 'M', 'طى'), - (0xFD12, 'M', 'طي'), - (0xFD13, 'M', 'عى'), - (0xFD14, 'M', 'عي'), - (0xFD15, 'M', 'غى'), - (0xFD16, 'M', 'غي'), - (0xFD17, 'M', 'سى'), - (0xFD18, 'M', 'سي'), - (0xFD19, 'M', 'شى'), - (0xFD1A, 'M', 'شي'), - (0xFD1B, 'M', 'حى'), - (0xFD1C, 'M', 'حي'), - (0xFD1D, 'M', 'جى'), - (0xFD1E, 'M', 'جي'), - ] - -def _seg_48() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFD1F, 'M', 'خى'), - (0xFD20, 'M', 'خي'), - (0xFD21, 'M', 'صى'), - (0xFD22, 'M', 'صي'), - (0xFD23, 'M', 'ضى'), - (0xFD24, 'M', 'ضي'), - (0xFD25, 'M', 'شج'), - (0xFD26, 'M', 'شح'), - (0xFD27, 'M', 'شخ'), - (0xFD28, 'M', 'شم'), - (0xFD29, 'M', 'شر'), - (0xFD2A, 'M', 'سر'), - (0xFD2B, 'M', 'صر'), - (0xFD2C, 'M', 'ضر'), - (0xFD2D, 'M', 'شج'), - (0xFD2E, 'M', 'شح'), - (0xFD2F, 'M', 'شخ'), - (0xFD30, 'M', 'شم'), - (0xFD31, 'M', 'سه'), - (0xFD32, 'M', 'شه'), - (0xFD33, 'M', 'طم'), - (0xFD34, 'M', 'سج'), - (0xFD35, 'M', 'سح'), - (0xFD36, 'M', 'سخ'), - (0xFD37, 'M', 'شج'), - (0xFD38, 'M', 'شح'), - (0xFD39, 'M', 'شخ'), - (0xFD3A, 'M', 'طم'), - (0xFD3B, 'M', 'ظم'), - (0xFD3C, 'M', 'اً'), - (0xFD3E, 'V'), - (0xFD50, 'M', 'تجم'), - (0xFD51, 'M', 'تحج'), - (0xFD53, 'M', 'تحم'), - (0xFD54, 'M', 'تخم'), - (0xFD55, 'M', 'تمج'), - (0xFD56, 'M', 'تمح'), - (0xFD57, 'M', 'تمخ'), - (0xFD58, 'M', 'جمح'), - (0xFD5A, 'M', 'حمي'), - (0xFD5B, 'M', 'حمى'), - (0xFD5C, 'M', 'سحج'), - (0xFD5D, 'M', 'سجح'), - (0xFD5E, 'M', 'سجى'), - (0xFD5F, 'M', 'سمح'), - (0xFD61, 'M', 'سمج'), - (0xFD62, 'M', 'سمم'), - (0xFD64, 'M', 'صحح'), - (0xFD66, 'M', 'صمم'), - (0xFD67, 'M', 'شحم'), - (0xFD69, 'M', 'شجي'), - (0xFD6A, 'M', 'شمخ'), - (0xFD6C, 'M', 'شمم'), - (0xFD6E, 'M', 'ضحى'), - (0xFD6F, 'M', 'ضخم'), - (0xFD71, 'M', 'طمح'), - (0xFD73, 'M', 'طمم'), - (0xFD74, 'M', 'طمي'), - (0xFD75, 'M', 'عجم'), - (0xFD76, 'M', 'عمم'), - (0xFD78, 'M', 'عمى'), - (0xFD79, 'M', 'غمم'), - (0xFD7A, 'M', 'غمي'), - (0xFD7B, 'M', 'غمى'), - (0xFD7C, 'M', 'فخم'), - (0xFD7E, 'M', 'قمح'), - (0xFD7F, 'M', 'قمم'), - (0xFD80, 'M', 'لحم'), - (0xFD81, 'M', 'لحي'), - (0xFD82, 'M', 'لحى'), - (0xFD83, 'M', 'لجج'), - (0xFD85, 'M', 'لخم'), - (0xFD87, 'M', 'لمح'), - (0xFD89, 'M', 'محج'), - (0xFD8A, 'M', 'محم'), - (0xFD8B, 'M', 'محي'), - (0xFD8C, 'M', 'مجح'), - (0xFD8D, 'M', 'مجم'), - (0xFD8E, 'M', 'مخج'), - (0xFD8F, 'M', 'مخم'), - (0xFD90, 'X'), - (0xFD92, 'M', 'مجخ'), - (0xFD93, 'M', 'همج'), - (0xFD94, 'M', 'همم'), - (0xFD95, 'M', 'نحم'), - (0xFD96, 'M', 'نحى'), - (0xFD97, 'M', 'نجم'), - (0xFD99, 'M', 'نجى'), - (0xFD9A, 'M', 'نمي'), - (0xFD9B, 'M', 'نمى'), - (0xFD9C, 'M', 'يمم'), - (0xFD9E, 'M', 'بخي'), - (0xFD9F, 'M', 'تجي'), - (0xFDA0, 'M', 'تجى'), - (0xFDA1, 'M', 'تخي'), - (0xFDA2, 'M', 'تخى'), - (0xFDA3, 'M', 'تمي'), - (0xFDA4, 'M', 'تمى'), - (0xFDA5, 'M', 'جمي'), - (0xFDA6, 'M', 'جحى'), - ] - -def _seg_49() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFDA7, 'M', 'جمى'), - (0xFDA8, 'M', 'سخى'), - (0xFDA9, 'M', 'صحي'), - (0xFDAA, 'M', 'شحي'), - (0xFDAB, 'M', 'ضحي'), - (0xFDAC, 'M', 'لجي'), - (0xFDAD, 'M', 'لمي'), - (0xFDAE, 'M', 'يحي'), - (0xFDAF, 'M', 'يجي'), - (0xFDB0, 'M', 'يمي'), - (0xFDB1, 'M', 'ممي'), - (0xFDB2, 'M', 'قمي'), - (0xFDB3, 'M', 'نحي'), - (0xFDB4, 'M', 'قمح'), - (0xFDB5, 'M', 'لحم'), - (0xFDB6, 'M', 'عمي'), - (0xFDB7, 'M', 'كمي'), - (0xFDB8, 'M', 'نجح'), - (0xFDB9, 'M', 'مخي'), - (0xFDBA, 'M', 'لجم'), - (0xFDBB, 'M', 'كمم'), - (0xFDBC, 'M', 'لجم'), - (0xFDBD, 'M', 'نجح'), - (0xFDBE, 'M', 'جحي'), - (0xFDBF, 'M', 'حجي'), - (0xFDC0, 'M', 'مجي'), - (0xFDC1, 'M', 'فمي'), - (0xFDC2, 'M', 'بحي'), - (0xFDC3, 'M', 'كمم'), - (0xFDC4, 'M', 'عجم'), - (0xFDC5, 'M', 'صمم'), - (0xFDC6, 'M', 'سخي'), - (0xFDC7, 'M', 'نجي'), - (0xFDC8, 'X'), - (0xFDCF, 'V'), - (0xFDD0, 'X'), - (0xFDF0, 'M', 'صلے'), - (0xFDF1, 'M', 'قلے'), - (0xFDF2, 'M', 'الله'), - (0xFDF3, 'M', 'اكبر'), - (0xFDF4, 'M', 'محمد'), - (0xFDF5, 'M', 'صلعم'), - (0xFDF6, 'M', 'رسول'), - (0xFDF7, 'M', 'عليه'), - (0xFDF8, 'M', 'وسلم'), - (0xFDF9, 'M', 'صلى'), - (0xFDFA, '3', 'صلى الله عليه وسلم'), - (0xFDFB, '3', 'جل جلاله'), - (0xFDFC, 'M', 'ریال'), - (0xFDFD, 'V'), - (0xFE00, 'I'), - (0xFE10, '3', ','), - (0xFE11, 'M', '、'), - (0xFE12, 'X'), - (0xFE13, '3', ':'), - (0xFE14, '3', ';'), - (0xFE15, '3', '!'), - (0xFE16, '3', '?'), - (0xFE17, 'M', '〖'), - (0xFE18, 'M', '〗'), - (0xFE19, 'X'), - (0xFE20, 'V'), - (0xFE30, 'X'), - (0xFE31, 'M', '—'), - (0xFE32, 'M', '–'), - (0xFE33, '3', '_'), - (0xFE35, '3', '('), - (0xFE36, '3', ')'), - (0xFE37, '3', '{'), - (0xFE38, '3', '}'), - (0xFE39, 'M', '〔'), - (0xFE3A, 'M', '〕'), - (0xFE3B, 'M', '【'), - (0xFE3C, 'M', '】'), - (0xFE3D, 'M', '《'), - (0xFE3E, 'M', '》'), - (0xFE3F, 'M', '〈'), - (0xFE40, 'M', '〉'), - (0xFE41, 'M', '「'), - (0xFE42, 'M', '」'), - (0xFE43, 'M', '『'), - (0xFE44, 'M', '』'), - (0xFE45, 'V'), - (0xFE47, '3', '['), - (0xFE48, '3', ']'), - (0xFE49, '3', ' ̅'), - (0xFE4D, '3', '_'), - (0xFE50, '3', ','), - (0xFE51, 'M', '、'), - (0xFE52, 'X'), - (0xFE54, '3', ';'), - (0xFE55, '3', ':'), - (0xFE56, '3', '?'), - (0xFE57, '3', '!'), - (0xFE58, 'M', '—'), - (0xFE59, '3', '('), - (0xFE5A, '3', ')'), - (0xFE5B, '3', '{'), - (0xFE5C, '3', '}'), - (0xFE5D, 'M', '〔'), - ] - -def _seg_50() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFE5E, 'M', '〕'), - (0xFE5F, '3', '#'), - (0xFE60, '3', '&'), - (0xFE61, '3', '*'), - (0xFE62, '3', '+'), - (0xFE63, 'M', '-'), - (0xFE64, '3', '<'), - (0xFE65, '3', '>'), - (0xFE66, '3', '='), - (0xFE67, 'X'), - (0xFE68, '3', '\\'), - (0xFE69, '3', '$'), - (0xFE6A, '3', '%'), - (0xFE6B, '3', '@'), - (0xFE6C, 'X'), - (0xFE70, '3', ' ً'), - (0xFE71, 'M', 'ـً'), - (0xFE72, '3', ' ٌ'), - (0xFE73, 'V'), - (0xFE74, '3', ' ٍ'), - (0xFE75, 'X'), - (0xFE76, '3', ' َ'), - (0xFE77, 'M', 'ـَ'), - (0xFE78, '3', ' ُ'), - (0xFE79, 'M', 'ـُ'), - (0xFE7A, '3', ' ِ'), - (0xFE7B, 'M', 'ـِ'), - (0xFE7C, '3', ' ّ'), - (0xFE7D, 'M', 'ـّ'), - (0xFE7E, '3', ' ْ'), - (0xFE7F, 'M', 'ـْ'), - (0xFE80, 'M', 'ء'), - (0xFE81, 'M', 'آ'), - (0xFE83, 'M', 'أ'), - (0xFE85, 'M', 'ؤ'), - (0xFE87, 'M', 'إ'), - (0xFE89, 'M', 'ئ'), - (0xFE8D, 'M', 'ا'), - (0xFE8F, 'M', 'ب'), - (0xFE93, 'M', 'ة'), - (0xFE95, 'M', 'ت'), - (0xFE99, 'M', 'ث'), - (0xFE9D, 'M', 'ج'), - (0xFEA1, 'M', 'ح'), - (0xFEA5, 'M', 'خ'), - (0xFEA9, 'M', 'د'), - (0xFEAB, 'M', 'ذ'), - (0xFEAD, 'M', 'ر'), - (0xFEAF, 'M', 'ز'), - (0xFEB1, 'M', 'س'), - (0xFEB5, 'M', 'ش'), - (0xFEB9, 'M', 'ص'), - (0xFEBD, 'M', 'ض'), - (0xFEC1, 'M', 'ط'), - (0xFEC5, 'M', 'ظ'), - (0xFEC9, 'M', 'ع'), - (0xFECD, 'M', 'غ'), - (0xFED1, 'M', 'ف'), - (0xFED5, 'M', 'ق'), - (0xFED9, 'M', 'ك'), - (0xFEDD, 'M', 'ل'), - (0xFEE1, 'M', 'م'), - (0xFEE5, 'M', 'ن'), - (0xFEE9, 'M', 'ه'), - (0xFEED, 'M', 'و'), - (0xFEEF, 'M', 'ى'), - (0xFEF1, 'M', 'ي'), - (0xFEF5, 'M', 'لآ'), - (0xFEF7, 'M', 'لأ'), - (0xFEF9, 'M', 'لإ'), - (0xFEFB, 'M', 'لا'), - (0xFEFD, 'X'), - (0xFEFF, 'I'), - (0xFF00, 'X'), - (0xFF01, '3', '!'), - (0xFF02, '3', '"'), - (0xFF03, '3', '#'), - (0xFF04, '3', '$'), - (0xFF05, '3', '%'), - (0xFF06, '3', '&'), - (0xFF07, '3', '\''), - (0xFF08, '3', '('), - (0xFF09, '3', ')'), - (0xFF0A, '3', '*'), - (0xFF0B, '3', '+'), - (0xFF0C, '3', ','), - (0xFF0D, 'M', '-'), - (0xFF0E, 'M', '.'), - (0xFF0F, '3', '/'), - (0xFF10, 'M', '0'), - (0xFF11, 'M', '1'), - (0xFF12, 'M', '2'), - (0xFF13, 'M', '3'), - (0xFF14, 'M', '4'), - (0xFF15, 'M', '5'), - (0xFF16, 'M', '6'), - (0xFF17, 'M', '7'), - (0xFF18, 'M', '8'), - (0xFF19, 'M', '9'), - (0xFF1A, '3', ':'), - ] - -def _seg_51() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFF1B, '3', ';'), - (0xFF1C, '3', '<'), - (0xFF1D, '3', '='), - (0xFF1E, '3', '>'), - (0xFF1F, '3', '?'), - (0xFF20, '3', '@'), - (0xFF21, 'M', 'a'), - (0xFF22, 'M', 'b'), - (0xFF23, 'M', 'c'), - (0xFF24, 'M', 'd'), - (0xFF25, 'M', 'e'), - (0xFF26, 'M', 'f'), - (0xFF27, 'M', 'g'), - (0xFF28, 'M', 'h'), - (0xFF29, 'M', 'i'), - (0xFF2A, 'M', 'j'), - (0xFF2B, 'M', 'k'), - (0xFF2C, 'M', 'l'), - (0xFF2D, 'M', 'm'), - (0xFF2E, 'M', 'n'), - (0xFF2F, 'M', 'o'), - (0xFF30, 'M', 'p'), - (0xFF31, 'M', 'q'), - (0xFF32, 'M', 'r'), - (0xFF33, 'M', 's'), - (0xFF34, 'M', 't'), - (0xFF35, 'M', 'u'), - (0xFF36, 'M', 'v'), - (0xFF37, 'M', 'w'), - (0xFF38, 'M', 'x'), - (0xFF39, 'M', 'y'), - (0xFF3A, 'M', 'z'), - (0xFF3B, '3', '['), - (0xFF3C, '3', '\\'), - (0xFF3D, '3', ']'), - (0xFF3E, '3', '^'), - (0xFF3F, '3', '_'), - (0xFF40, '3', '`'), - (0xFF41, 'M', 'a'), - (0xFF42, 'M', 'b'), - (0xFF43, 'M', 'c'), - (0xFF44, 'M', 'd'), - (0xFF45, 'M', 'e'), - (0xFF46, 'M', 'f'), - (0xFF47, 'M', 'g'), - (0xFF48, 'M', 'h'), - (0xFF49, 'M', 'i'), - (0xFF4A, 'M', 'j'), - (0xFF4B, 'M', 'k'), - (0xFF4C, 'M', 'l'), - (0xFF4D, 'M', 'm'), - (0xFF4E, 'M', 'n'), - (0xFF4F, 'M', 'o'), - (0xFF50, 'M', 'p'), - (0xFF51, 'M', 'q'), - (0xFF52, 'M', 'r'), - (0xFF53, 'M', 's'), - (0xFF54, 'M', 't'), - (0xFF55, 'M', 'u'), - (0xFF56, 'M', 'v'), - (0xFF57, 'M', 'w'), - (0xFF58, 'M', 'x'), - (0xFF59, 'M', 'y'), - (0xFF5A, 'M', 'z'), - (0xFF5B, '3', '{'), - (0xFF5C, '3', '|'), - (0xFF5D, '3', '}'), - (0xFF5E, '3', '~'), - (0xFF5F, 'M', '⦅'), - (0xFF60, 'M', '⦆'), - (0xFF61, 'M', '.'), - (0xFF62, 'M', '「'), - (0xFF63, 'M', '」'), - (0xFF64, 'M', '、'), - (0xFF65, 'M', '・'), - (0xFF66, 'M', 'ヲ'), - (0xFF67, 'M', 'ァ'), - (0xFF68, 'M', 'ィ'), - (0xFF69, 'M', 'ゥ'), - (0xFF6A, 'M', 'ェ'), - (0xFF6B, 'M', 'ォ'), - (0xFF6C, 'M', 'ャ'), - (0xFF6D, 'M', 'ュ'), - (0xFF6E, 'M', 'ョ'), - (0xFF6F, 'M', 'ッ'), - (0xFF70, 'M', 'ー'), - (0xFF71, 'M', 'ア'), - (0xFF72, 'M', 'イ'), - (0xFF73, 'M', 'ウ'), - (0xFF74, 'M', 'エ'), - (0xFF75, 'M', 'オ'), - (0xFF76, 'M', 'カ'), - (0xFF77, 'M', 'キ'), - (0xFF78, 'M', 'ク'), - (0xFF79, 'M', 'ケ'), - (0xFF7A, 'M', 'コ'), - (0xFF7B, 'M', 'サ'), - (0xFF7C, 'M', 'シ'), - (0xFF7D, 'M', 'ス'), - (0xFF7E, 'M', 'セ'), - ] - -def _seg_52() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFF7F, 'M', 'ソ'), - (0xFF80, 'M', 'タ'), - (0xFF81, 'M', 'チ'), - (0xFF82, 'M', 'ツ'), - (0xFF83, 'M', 'テ'), - (0xFF84, 'M', 'ト'), - (0xFF85, 'M', 'ナ'), - (0xFF86, 'M', 'ニ'), - (0xFF87, 'M', 'ヌ'), - (0xFF88, 'M', 'ネ'), - (0xFF89, 'M', 'ノ'), - (0xFF8A, 'M', 'ハ'), - (0xFF8B, 'M', 'ヒ'), - (0xFF8C, 'M', 'フ'), - (0xFF8D, 'M', 'ヘ'), - (0xFF8E, 'M', 'ホ'), - (0xFF8F, 'M', 'マ'), - (0xFF90, 'M', 'ミ'), - (0xFF91, 'M', 'ム'), - (0xFF92, 'M', 'メ'), - (0xFF93, 'M', 'モ'), - (0xFF94, 'M', 'ヤ'), - (0xFF95, 'M', 'ユ'), - (0xFF96, 'M', 'ヨ'), - (0xFF97, 'M', 'ラ'), - (0xFF98, 'M', 'リ'), - (0xFF99, 'M', 'ル'), - (0xFF9A, 'M', 'レ'), - (0xFF9B, 'M', 'ロ'), - (0xFF9C, 'M', 'ワ'), - (0xFF9D, 'M', 'ン'), - (0xFF9E, 'M', '゙'), - (0xFF9F, 'M', '゚'), - (0xFFA0, 'X'), - (0xFFA1, 'M', 'ᄀ'), - (0xFFA2, 'M', 'ᄁ'), - (0xFFA3, 'M', 'ᆪ'), - (0xFFA4, 'M', 'ᄂ'), - (0xFFA5, 'M', 'ᆬ'), - (0xFFA6, 'M', 'ᆭ'), - (0xFFA7, 'M', 'ᄃ'), - (0xFFA8, 'M', 'ᄄ'), - (0xFFA9, 'M', 'ᄅ'), - (0xFFAA, 'M', 'ᆰ'), - (0xFFAB, 'M', 'ᆱ'), - (0xFFAC, 'M', 'ᆲ'), - (0xFFAD, 'M', 'ᆳ'), - (0xFFAE, 'M', 'ᆴ'), - (0xFFAF, 'M', 'ᆵ'), - (0xFFB0, 'M', 'ᄚ'), - (0xFFB1, 'M', 'ᄆ'), - (0xFFB2, 'M', 'ᄇ'), - (0xFFB3, 'M', 'ᄈ'), - (0xFFB4, 'M', 'ᄡ'), - (0xFFB5, 'M', 'ᄉ'), - (0xFFB6, 'M', 'ᄊ'), - (0xFFB7, 'M', 'ᄋ'), - (0xFFB8, 'M', 'ᄌ'), - (0xFFB9, 'M', 'ᄍ'), - (0xFFBA, 'M', 'ᄎ'), - (0xFFBB, 'M', 'ᄏ'), - (0xFFBC, 'M', 'ᄐ'), - (0xFFBD, 'M', 'ᄑ'), - (0xFFBE, 'M', 'ᄒ'), - (0xFFBF, 'X'), - (0xFFC2, 'M', 'ᅡ'), - (0xFFC3, 'M', 'ᅢ'), - (0xFFC4, 'M', 'ᅣ'), - (0xFFC5, 'M', 'ᅤ'), - (0xFFC6, 'M', 'ᅥ'), - (0xFFC7, 'M', 'ᅦ'), - (0xFFC8, 'X'), - (0xFFCA, 'M', 'ᅧ'), - (0xFFCB, 'M', 'ᅨ'), - (0xFFCC, 'M', 'ᅩ'), - (0xFFCD, 'M', 'ᅪ'), - (0xFFCE, 'M', 'ᅫ'), - (0xFFCF, 'M', 'ᅬ'), - (0xFFD0, 'X'), - (0xFFD2, 'M', 'ᅭ'), - (0xFFD3, 'M', 'ᅮ'), - (0xFFD4, 'M', 'ᅯ'), - (0xFFD5, 'M', 'ᅰ'), - (0xFFD6, 'M', 'ᅱ'), - (0xFFD7, 'M', 'ᅲ'), - (0xFFD8, 'X'), - (0xFFDA, 'M', 'ᅳ'), - (0xFFDB, 'M', 'ᅴ'), - (0xFFDC, 'M', 'ᅵ'), - (0xFFDD, 'X'), - (0xFFE0, 'M', '¢'), - (0xFFE1, 'M', '£'), - (0xFFE2, 'M', '¬'), - (0xFFE3, '3', ' ̄'), - (0xFFE4, 'M', '¦'), - (0xFFE5, 'M', '¥'), - (0xFFE6, 'M', '₩'), - (0xFFE7, 'X'), - (0xFFE8, 'M', '│'), - (0xFFE9, 'M', '←'), - ] - -def _seg_53() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFFEA, 'M', '↑'), - (0xFFEB, 'M', '→'), - (0xFFEC, 'M', '↓'), - (0xFFED, 'M', '■'), - (0xFFEE, 'M', '○'), - (0xFFEF, 'X'), - (0x10000, 'V'), - (0x1000C, 'X'), - (0x1000D, 'V'), - (0x10027, 'X'), - (0x10028, 'V'), - (0x1003B, 'X'), - (0x1003C, 'V'), - (0x1003E, 'X'), - (0x1003F, 'V'), - (0x1004E, 'X'), - (0x10050, 'V'), - (0x1005E, 'X'), - (0x10080, 'V'), - (0x100FB, 'X'), - (0x10100, 'V'), - (0x10103, 'X'), - (0x10107, 'V'), - (0x10134, 'X'), - (0x10137, 'V'), - (0x1018F, 'X'), - (0x10190, 'V'), - (0x1019D, 'X'), - (0x101A0, 'V'), - (0x101A1, 'X'), - (0x101D0, 'V'), - (0x101FE, 'X'), - (0x10280, 'V'), - (0x1029D, 'X'), - (0x102A0, 'V'), - (0x102D1, 'X'), - (0x102E0, 'V'), - (0x102FC, 'X'), - (0x10300, 'V'), - (0x10324, 'X'), - (0x1032D, 'V'), - (0x1034B, 'X'), - (0x10350, 'V'), - (0x1037B, 'X'), - (0x10380, 'V'), - (0x1039E, 'X'), - (0x1039F, 'V'), - (0x103C4, 'X'), - (0x103C8, 'V'), - (0x103D6, 'X'), - (0x10400, 'M', '𐐨'), - (0x10401, 'M', '𐐩'), - (0x10402, 'M', '𐐪'), - (0x10403, 'M', '𐐫'), - (0x10404, 'M', '𐐬'), - (0x10405, 'M', '𐐭'), - (0x10406, 'M', '𐐮'), - (0x10407, 'M', '𐐯'), - (0x10408, 'M', '𐐰'), - (0x10409, 'M', '𐐱'), - (0x1040A, 'M', '𐐲'), - (0x1040B, 'M', '𐐳'), - (0x1040C, 'M', '𐐴'), - (0x1040D, 'M', '𐐵'), - (0x1040E, 'M', '𐐶'), - (0x1040F, 'M', '𐐷'), - (0x10410, 'M', '𐐸'), - (0x10411, 'M', '𐐹'), - (0x10412, 'M', '𐐺'), - (0x10413, 'M', '𐐻'), - (0x10414, 'M', '𐐼'), - (0x10415, 'M', '𐐽'), - (0x10416, 'M', '𐐾'), - (0x10417, 'M', '𐐿'), - (0x10418, 'M', '𐑀'), - (0x10419, 'M', '𐑁'), - (0x1041A, 'M', '𐑂'), - (0x1041B, 'M', '𐑃'), - (0x1041C, 'M', '𐑄'), - (0x1041D, 'M', '𐑅'), - (0x1041E, 'M', '𐑆'), - (0x1041F, 'M', '𐑇'), - (0x10420, 'M', '𐑈'), - (0x10421, 'M', '𐑉'), - (0x10422, 'M', '𐑊'), - (0x10423, 'M', '𐑋'), - (0x10424, 'M', '𐑌'), - (0x10425, 'M', '𐑍'), - (0x10426, 'M', '𐑎'), - (0x10427, 'M', '𐑏'), - (0x10428, 'V'), - (0x1049E, 'X'), - (0x104A0, 'V'), - (0x104AA, 'X'), - (0x104B0, 'M', '𐓘'), - (0x104B1, 'M', '𐓙'), - (0x104B2, 'M', '𐓚'), - (0x104B3, 'M', '𐓛'), - (0x104B4, 'M', '𐓜'), - (0x104B5, 'M', '𐓝'), - ] - -def _seg_54() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x104B6, 'M', '𐓞'), - (0x104B7, 'M', '𐓟'), - (0x104B8, 'M', '𐓠'), - (0x104B9, 'M', '𐓡'), - (0x104BA, 'M', '𐓢'), - (0x104BB, 'M', '𐓣'), - (0x104BC, 'M', '𐓤'), - (0x104BD, 'M', '𐓥'), - (0x104BE, 'M', '𐓦'), - (0x104BF, 'M', '𐓧'), - (0x104C0, 'M', '𐓨'), - (0x104C1, 'M', '𐓩'), - (0x104C2, 'M', '𐓪'), - (0x104C3, 'M', '𐓫'), - (0x104C4, 'M', '𐓬'), - (0x104C5, 'M', '𐓭'), - (0x104C6, 'M', '𐓮'), - (0x104C7, 'M', '𐓯'), - (0x104C8, 'M', '𐓰'), - (0x104C9, 'M', '𐓱'), - (0x104CA, 'M', '𐓲'), - (0x104CB, 'M', '𐓳'), - (0x104CC, 'M', '𐓴'), - (0x104CD, 'M', '𐓵'), - (0x104CE, 'M', '𐓶'), - (0x104CF, 'M', '𐓷'), - (0x104D0, 'M', '𐓸'), - (0x104D1, 'M', '𐓹'), - (0x104D2, 'M', '𐓺'), - (0x104D3, 'M', '𐓻'), - (0x104D4, 'X'), - (0x104D8, 'V'), - (0x104FC, 'X'), - (0x10500, 'V'), - (0x10528, 'X'), - (0x10530, 'V'), - (0x10564, 'X'), - (0x1056F, 'V'), - (0x10570, 'M', '𐖗'), - (0x10571, 'M', '𐖘'), - (0x10572, 'M', '𐖙'), - (0x10573, 'M', '𐖚'), - (0x10574, 'M', '𐖛'), - (0x10575, 'M', '𐖜'), - (0x10576, 'M', '𐖝'), - (0x10577, 'M', '𐖞'), - (0x10578, 'M', '𐖟'), - (0x10579, 'M', '𐖠'), - (0x1057A, 'M', '𐖡'), - (0x1057B, 'X'), - (0x1057C, 'M', '𐖣'), - (0x1057D, 'M', '𐖤'), - (0x1057E, 'M', '𐖥'), - (0x1057F, 'M', '𐖦'), - (0x10580, 'M', '𐖧'), - (0x10581, 'M', '𐖨'), - (0x10582, 'M', '𐖩'), - (0x10583, 'M', '𐖪'), - (0x10584, 'M', '𐖫'), - (0x10585, 'M', '𐖬'), - (0x10586, 'M', '𐖭'), - (0x10587, 'M', '𐖮'), - (0x10588, 'M', '𐖯'), - (0x10589, 'M', '𐖰'), - (0x1058A, 'M', '𐖱'), - (0x1058B, 'X'), - (0x1058C, 'M', '𐖳'), - (0x1058D, 'M', '𐖴'), - (0x1058E, 'M', '𐖵'), - (0x1058F, 'M', '𐖶'), - (0x10590, 'M', '𐖷'), - (0x10591, 'M', '𐖸'), - (0x10592, 'M', '𐖹'), - (0x10593, 'X'), - (0x10594, 'M', '𐖻'), - (0x10595, 'M', '𐖼'), - (0x10596, 'X'), - (0x10597, 'V'), - (0x105A2, 'X'), - (0x105A3, 'V'), - (0x105B2, 'X'), - (0x105B3, 'V'), - (0x105BA, 'X'), - (0x105BB, 'V'), - (0x105BD, 'X'), - (0x10600, 'V'), - (0x10737, 'X'), - (0x10740, 'V'), - (0x10756, 'X'), - (0x10760, 'V'), - (0x10768, 'X'), - (0x10780, 'V'), - (0x10781, 'M', 'ː'), - (0x10782, 'M', 'ˑ'), - (0x10783, 'M', 'æ'), - (0x10784, 'M', 'ʙ'), - (0x10785, 'M', 'ɓ'), - (0x10786, 'X'), - (0x10787, 'M', 'ʣ'), - (0x10788, 'M', 'ꭦ'), - ] - -def _seg_55() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x10789, 'M', 'ʥ'), - (0x1078A, 'M', 'ʤ'), - (0x1078B, 'M', 'ɖ'), - (0x1078C, 'M', 'ɗ'), - (0x1078D, 'M', 'ᶑ'), - (0x1078E, 'M', 'ɘ'), - (0x1078F, 'M', 'ɞ'), - (0x10790, 'M', 'ʩ'), - (0x10791, 'M', 'ɤ'), - (0x10792, 'M', 'ɢ'), - (0x10793, 'M', 'ɠ'), - (0x10794, 'M', 'ʛ'), - (0x10795, 'M', 'ħ'), - (0x10796, 'M', 'ʜ'), - (0x10797, 'M', 'ɧ'), - (0x10798, 'M', 'ʄ'), - (0x10799, 'M', 'ʪ'), - (0x1079A, 'M', 'ʫ'), - (0x1079B, 'M', 'ɬ'), - (0x1079C, 'M', '𝼄'), - (0x1079D, 'M', 'ꞎ'), - (0x1079E, 'M', 'ɮ'), - (0x1079F, 'M', '𝼅'), - (0x107A0, 'M', 'ʎ'), - (0x107A1, 'M', '𝼆'), - (0x107A2, 'M', 'ø'), - (0x107A3, 'M', 'ɶ'), - (0x107A4, 'M', 'ɷ'), - (0x107A5, 'M', 'q'), - (0x107A6, 'M', 'ɺ'), - (0x107A7, 'M', '𝼈'), - (0x107A8, 'M', 'ɽ'), - (0x107A9, 'M', 'ɾ'), - (0x107AA, 'M', 'ʀ'), - (0x107AB, 'M', 'ʨ'), - (0x107AC, 'M', 'ʦ'), - (0x107AD, 'M', 'ꭧ'), - (0x107AE, 'M', 'ʧ'), - (0x107AF, 'M', 'ʈ'), - (0x107B0, 'M', 'ⱱ'), - (0x107B1, 'X'), - (0x107B2, 'M', 'ʏ'), - (0x107B3, 'M', 'ʡ'), - (0x107B4, 'M', 'ʢ'), - (0x107B5, 'M', 'ʘ'), - (0x107B6, 'M', 'ǀ'), - (0x107B7, 'M', 'ǁ'), - (0x107B8, 'M', 'ǂ'), - (0x107B9, 'M', '𝼊'), - (0x107BA, 'M', '𝼞'), - (0x107BB, 'X'), - (0x10800, 'V'), - (0x10806, 'X'), - (0x10808, 'V'), - (0x10809, 'X'), - (0x1080A, 'V'), - (0x10836, 'X'), - (0x10837, 'V'), - (0x10839, 'X'), - (0x1083C, 'V'), - (0x1083D, 'X'), - (0x1083F, 'V'), - (0x10856, 'X'), - (0x10857, 'V'), - (0x1089F, 'X'), - (0x108A7, 'V'), - (0x108B0, 'X'), - (0x108E0, 'V'), - (0x108F3, 'X'), - (0x108F4, 'V'), - (0x108F6, 'X'), - (0x108FB, 'V'), - (0x1091C, 'X'), - (0x1091F, 'V'), - (0x1093A, 'X'), - (0x1093F, 'V'), - (0x10940, 'X'), - (0x10980, 'V'), - (0x109B8, 'X'), - (0x109BC, 'V'), - (0x109D0, 'X'), - (0x109D2, 'V'), - (0x10A04, 'X'), - (0x10A05, 'V'), - (0x10A07, 'X'), - (0x10A0C, 'V'), - (0x10A14, 'X'), - (0x10A15, 'V'), - (0x10A18, 'X'), - (0x10A19, 'V'), - (0x10A36, 'X'), - (0x10A38, 'V'), - (0x10A3B, 'X'), - (0x10A3F, 'V'), - (0x10A49, 'X'), - (0x10A50, 'V'), - (0x10A59, 'X'), - (0x10A60, 'V'), - (0x10AA0, 'X'), - (0x10AC0, 'V'), - ] - -def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x10AE7, 'X'), - (0x10AEB, 'V'), - (0x10AF7, 'X'), - (0x10B00, 'V'), - (0x10B36, 'X'), - (0x10B39, 'V'), - (0x10B56, 'X'), - (0x10B58, 'V'), - (0x10B73, 'X'), - (0x10B78, 'V'), - (0x10B92, 'X'), - (0x10B99, 'V'), - (0x10B9D, 'X'), - (0x10BA9, 'V'), - (0x10BB0, 'X'), - (0x10C00, 'V'), - (0x10C49, 'X'), - (0x10C80, 'M', '𐳀'), - (0x10C81, 'M', '𐳁'), - (0x10C82, 'M', '𐳂'), - (0x10C83, 'M', '𐳃'), - (0x10C84, 'M', '𐳄'), - (0x10C85, 'M', '𐳅'), - (0x10C86, 'M', '𐳆'), - (0x10C87, 'M', '𐳇'), - (0x10C88, 'M', '𐳈'), - (0x10C89, 'M', '𐳉'), - (0x10C8A, 'M', '𐳊'), - (0x10C8B, 'M', '𐳋'), - (0x10C8C, 'M', '𐳌'), - (0x10C8D, 'M', '𐳍'), - (0x10C8E, 'M', '𐳎'), - (0x10C8F, 'M', '𐳏'), - (0x10C90, 'M', '𐳐'), - (0x10C91, 'M', '𐳑'), - (0x10C92, 'M', '𐳒'), - (0x10C93, 'M', '𐳓'), - (0x10C94, 'M', '𐳔'), - (0x10C95, 'M', '𐳕'), - (0x10C96, 'M', '𐳖'), - (0x10C97, 'M', '𐳗'), - (0x10C98, 'M', '𐳘'), - (0x10C99, 'M', '𐳙'), - (0x10C9A, 'M', '𐳚'), - (0x10C9B, 'M', '𐳛'), - (0x10C9C, 'M', '𐳜'), - (0x10C9D, 'M', '𐳝'), - (0x10C9E, 'M', '𐳞'), - (0x10C9F, 'M', '𐳟'), - (0x10CA0, 'M', '𐳠'), - (0x10CA1, 'M', '𐳡'), - (0x10CA2, 'M', '𐳢'), - (0x10CA3, 'M', '𐳣'), - (0x10CA4, 'M', '𐳤'), - (0x10CA5, 'M', '𐳥'), - (0x10CA6, 'M', '𐳦'), - (0x10CA7, 'M', '𐳧'), - (0x10CA8, 'M', '𐳨'), - (0x10CA9, 'M', '𐳩'), - (0x10CAA, 'M', '𐳪'), - (0x10CAB, 'M', '𐳫'), - (0x10CAC, 'M', '𐳬'), - (0x10CAD, 'M', '𐳭'), - (0x10CAE, 'M', '𐳮'), - (0x10CAF, 'M', '𐳯'), - (0x10CB0, 'M', '𐳰'), - (0x10CB1, 'M', '𐳱'), - (0x10CB2, 'M', '𐳲'), - (0x10CB3, 'X'), - (0x10CC0, 'V'), - (0x10CF3, 'X'), - (0x10CFA, 'V'), - (0x10D28, 'X'), - (0x10D30, 'V'), - (0x10D3A, 'X'), - (0x10E60, 'V'), - (0x10E7F, 'X'), - (0x10E80, 'V'), - (0x10EAA, 'X'), - (0x10EAB, 'V'), - (0x10EAE, 'X'), - (0x10EB0, 'V'), - (0x10EB2, 'X'), - (0x10EFD, 'V'), - (0x10F28, 'X'), - (0x10F30, 'V'), - (0x10F5A, 'X'), - (0x10F70, 'V'), - (0x10F8A, 'X'), - (0x10FB0, 'V'), - (0x10FCC, 'X'), - (0x10FE0, 'V'), - (0x10FF7, 'X'), - (0x11000, 'V'), - (0x1104E, 'X'), - (0x11052, 'V'), - (0x11076, 'X'), - (0x1107F, 'V'), - (0x110BD, 'X'), - (0x110BE, 'V'), - ] - -def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x110C3, 'X'), - (0x110D0, 'V'), - (0x110E9, 'X'), - (0x110F0, 'V'), - (0x110FA, 'X'), - (0x11100, 'V'), - (0x11135, 'X'), - (0x11136, 'V'), - (0x11148, 'X'), - (0x11150, 'V'), - (0x11177, 'X'), - (0x11180, 'V'), - (0x111E0, 'X'), - (0x111E1, 'V'), - (0x111F5, 'X'), - (0x11200, 'V'), - (0x11212, 'X'), - (0x11213, 'V'), - (0x11242, 'X'), - (0x11280, 'V'), - (0x11287, 'X'), - (0x11288, 'V'), - (0x11289, 'X'), - (0x1128A, 'V'), - (0x1128E, 'X'), - (0x1128F, 'V'), - (0x1129E, 'X'), - (0x1129F, 'V'), - (0x112AA, 'X'), - (0x112B0, 'V'), - (0x112EB, 'X'), - (0x112F0, 'V'), - (0x112FA, 'X'), - (0x11300, 'V'), - (0x11304, 'X'), - (0x11305, 'V'), - (0x1130D, 'X'), - (0x1130F, 'V'), - (0x11311, 'X'), - (0x11313, 'V'), - (0x11329, 'X'), - (0x1132A, 'V'), - (0x11331, 'X'), - (0x11332, 'V'), - (0x11334, 'X'), - (0x11335, 'V'), - (0x1133A, 'X'), - (0x1133B, 'V'), - (0x11345, 'X'), - (0x11347, 'V'), - (0x11349, 'X'), - (0x1134B, 'V'), - (0x1134E, 'X'), - (0x11350, 'V'), - (0x11351, 'X'), - (0x11357, 'V'), - (0x11358, 'X'), - (0x1135D, 'V'), - (0x11364, 'X'), - (0x11366, 'V'), - (0x1136D, 'X'), - (0x11370, 'V'), - (0x11375, 'X'), - (0x11400, 'V'), - (0x1145C, 'X'), - (0x1145D, 'V'), - (0x11462, 'X'), - (0x11480, 'V'), - (0x114C8, 'X'), - (0x114D0, 'V'), - (0x114DA, 'X'), - (0x11580, 'V'), - (0x115B6, 'X'), - (0x115B8, 'V'), - (0x115DE, 'X'), - (0x11600, 'V'), - (0x11645, 'X'), - (0x11650, 'V'), - (0x1165A, 'X'), - (0x11660, 'V'), - (0x1166D, 'X'), - (0x11680, 'V'), - (0x116BA, 'X'), - (0x116C0, 'V'), - (0x116CA, 'X'), - (0x11700, 'V'), - (0x1171B, 'X'), - (0x1171D, 'V'), - (0x1172C, 'X'), - (0x11730, 'V'), - (0x11747, 'X'), - (0x11800, 'V'), - (0x1183C, 'X'), - (0x118A0, 'M', '𑣀'), - (0x118A1, 'M', '𑣁'), - (0x118A2, 'M', '𑣂'), - (0x118A3, 'M', '𑣃'), - (0x118A4, 'M', '𑣄'), - (0x118A5, 'M', '𑣅'), - (0x118A6, 'M', '𑣆'), - ] - -def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x118A7, 'M', '𑣇'), - (0x118A8, 'M', '𑣈'), - (0x118A9, 'M', '𑣉'), - (0x118AA, 'M', '𑣊'), - (0x118AB, 'M', '𑣋'), - (0x118AC, 'M', '𑣌'), - (0x118AD, 'M', '𑣍'), - (0x118AE, 'M', '𑣎'), - (0x118AF, 'M', '𑣏'), - (0x118B0, 'M', '𑣐'), - (0x118B1, 'M', '𑣑'), - (0x118B2, 'M', '𑣒'), - (0x118B3, 'M', '𑣓'), - (0x118B4, 'M', '𑣔'), - (0x118B5, 'M', '𑣕'), - (0x118B6, 'M', '𑣖'), - (0x118B7, 'M', '𑣗'), - (0x118B8, 'M', '𑣘'), - (0x118B9, 'M', '𑣙'), - (0x118BA, 'M', '𑣚'), - (0x118BB, 'M', '𑣛'), - (0x118BC, 'M', '𑣜'), - (0x118BD, 'M', '𑣝'), - (0x118BE, 'M', '𑣞'), - (0x118BF, 'M', '𑣟'), - (0x118C0, 'V'), - (0x118F3, 'X'), - (0x118FF, 'V'), - (0x11907, 'X'), - (0x11909, 'V'), - (0x1190A, 'X'), - (0x1190C, 'V'), - (0x11914, 'X'), - (0x11915, 'V'), - (0x11917, 'X'), - (0x11918, 'V'), - (0x11936, 'X'), - (0x11937, 'V'), - (0x11939, 'X'), - (0x1193B, 'V'), - (0x11947, 'X'), - (0x11950, 'V'), - (0x1195A, 'X'), - (0x119A0, 'V'), - (0x119A8, 'X'), - (0x119AA, 'V'), - (0x119D8, 'X'), - (0x119DA, 'V'), - (0x119E5, 'X'), - (0x11A00, 'V'), - (0x11A48, 'X'), - (0x11A50, 'V'), - (0x11AA3, 'X'), - (0x11AB0, 'V'), - (0x11AF9, 'X'), - (0x11B00, 'V'), - (0x11B0A, 'X'), - (0x11C00, 'V'), - (0x11C09, 'X'), - (0x11C0A, 'V'), - (0x11C37, 'X'), - (0x11C38, 'V'), - (0x11C46, 'X'), - (0x11C50, 'V'), - (0x11C6D, 'X'), - (0x11C70, 'V'), - (0x11C90, 'X'), - (0x11C92, 'V'), - (0x11CA8, 'X'), - (0x11CA9, 'V'), - (0x11CB7, 'X'), - (0x11D00, 'V'), - (0x11D07, 'X'), - (0x11D08, 'V'), - (0x11D0A, 'X'), - (0x11D0B, 'V'), - (0x11D37, 'X'), - (0x11D3A, 'V'), - (0x11D3B, 'X'), - (0x11D3C, 'V'), - (0x11D3E, 'X'), - (0x11D3F, 'V'), - (0x11D48, 'X'), - (0x11D50, 'V'), - (0x11D5A, 'X'), - (0x11D60, 'V'), - (0x11D66, 'X'), - (0x11D67, 'V'), - (0x11D69, 'X'), - (0x11D6A, 'V'), - (0x11D8F, 'X'), - (0x11D90, 'V'), - (0x11D92, 'X'), - (0x11D93, 'V'), - (0x11D99, 'X'), - (0x11DA0, 'V'), - (0x11DAA, 'X'), - (0x11EE0, 'V'), - (0x11EF9, 'X'), - (0x11F00, 'V'), - ] - -def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x11F11, 'X'), - (0x11F12, 'V'), - (0x11F3B, 'X'), - (0x11F3E, 'V'), - (0x11F5A, 'X'), - (0x11FB0, 'V'), - (0x11FB1, 'X'), - (0x11FC0, 'V'), - (0x11FF2, 'X'), - (0x11FFF, 'V'), - (0x1239A, 'X'), - (0x12400, 'V'), - (0x1246F, 'X'), - (0x12470, 'V'), - (0x12475, 'X'), - (0x12480, 'V'), - (0x12544, 'X'), - (0x12F90, 'V'), - (0x12FF3, 'X'), - (0x13000, 'V'), - (0x13430, 'X'), - (0x13440, 'V'), - (0x13456, 'X'), - (0x14400, 'V'), - (0x14647, 'X'), - (0x16800, 'V'), - (0x16A39, 'X'), - (0x16A40, 'V'), - (0x16A5F, 'X'), - (0x16A60, 'V'), - (0x16A6A, 'X'), - (0x16A6E, 'V'), - (0x16ABF, 'X'), - (0x16AC0, 'V'), - (0x16ACA, 'X'), - (0x16AD0, 'V'), - (0x16AEE, 'X'), - (0x16AF0, 'V'), - (0x16AF6, 'X'), - (0x16B00, 'V'), - (0x16B46, 'X'), - (0x16B50, 'V'), - (0x16B5A, 'X'), - (0x16B5B, 'V'), - (0x16B62, 'X'), - (0x16B63, 'V'), - (0x16B78, 'X'), - (0x16B7D, 'V'), - (0x16B90, 'X'), - (0x16E40, 'M', '𖹠'), - (0x16E41, 'M', '𖹡'), - (0x16E42, 'M', '𖹢'), - (0x16E43, 'M', '𖹣'), - (0x16E44, 'M', '𖹤'), - (0x16E45, 'M', '𖹥'), - (0x16E46, 'M', '𖹦'), - (0x16E47, 'M', '𖹧'), - (0x16E48, 'M', '𖹨'), - (0x16E49, 'M', '𖹩'), - (0x16E4A, 'M', '𖹪'), - (0x16E4B, 'M', '𖹫'), - (0x16E4C, 'M', '𖹬'), - (0x16E4D, 'M', '𖹭'), - (0x16E4E, 'M', '𖹮'), - (0x16E4F, 'M', '𖹯'), - (0x16E50, 'M', '𖹰'), - (0x16E51, 'M', '𖹱'), - (0x16E52, 'M', '𖹲'), - (0x16E53, 'M', '𖹳'), - (0x16E54, 'M', '𖹴'), - (0x16E55, 'M', '𖹵'), - (0x16E56, 'M', '𖹶'), - (0x16E57, 'M', '𖹷'), - (0x16E58, 'M', '𖹸'), - (0x16E59, 'M', '𖹹'), - (0x16E5A, 'M', '𖹺'), - (0x16E5B, 'M', '𖹻'), - (0x16E5C, 'M', '𖹼'), - (0x16E5D, 'M', '𖹽'), - (0x16E5E, 'M', '𖹾'), - (0x16E5F, 'M', '𖹿'), - (0x16E60, 'V'), - (0x16E9B, 'X'), - (0x16F00, 'V'), - (0x16F4B, 'X'), - (0x16F4F, 'V'), - (0x16F88, 'X'), - (0x16F8F, 'V'), - (0x16FA0, 'X'), - (0x16FE0, 'V'), - (0x16FE5, 'X'), - (0x16FF0, 'V'), - (0x16FF2, 'X'), - (0x17000, 'V'), - (0x187F8, 'X'), - (0x18800, 'V'), - (0x18CD6, 'X'), - (0x18D00, 'V'), - (0x18D09, 'X'), - (0x1AFF0, 'V'), - ] - -def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1AFF4, 'X'), - (0x1AFF5, 'V'), - (0x1AFFC, 'X'), - (0x1AFFD, 'V'), - (0x1AFFF, 'X'), - (0x1B000, 'V'), - (0x1B123, 'X'), - (0x1B132, 'V'), - (0x1B133, 'X'), - (0x1B150, 'V'), - (0x1B153, 'X'), - (0x1B155, 'V'), - (0x1B156, 'X'), - (0x1B164, 'V'), - (0x1B168, 'X'), - (0x1B170, 'V'), - (0x1B2FC, 'X'), - (0x1BC00, 'V'), - (0x1BC6B, 'X'), - (0x1BC70, 'V'), - (0x1BC7D, 'X'), - (0x1BC80, 'V'), - (0x1BC89, 'X'), - (0x1BC90, 'V'), - (0x1BC9A, 'X'), - (0x1BC9C, 'V'), - (0x1BCA0, 'I'), - (0x1BCA4, 'X'), - (0x1CF00, 'V'), - (0x1CF2E, 'X'), - (0x1CF30, 'V'), - (0x1CF47, 'X'), - (0x1CF50, 'V'), - (0x1CFC4, 'X'), - (0x1D000, 'V'), - (0x1D0F6, 'X'), - (0x1D100, 'V'), - (0x1D127, 'X'), - (0x1D129, 'V'), - (0x1D15E, 'M', '𝅗𝅥'), - (0x1D15F, 'M', '𝅘𝅥'), - (0x1D160, 'M', '𝅘𝅥𝅮'), - (0x1D161, 'M', '𝅘𝅥𝅯'), - (0x1D162, 'M', '𝅘𝅥𝅰'), - (0x1D163, 'M', '𝅘𝅥𝅱'), - (0x1D164, 'M', '𝅘𝅥𝅲'), - (0x1D165, 'V'), - (0x1D173, 'X'), - (0x1D17B, 'V'), - (0x1D1BB, 'M', '𝆹𝅥'), - (0x1D1BC, 'M', '𝆺𝅥'), - (0x1D1BD, 'M', '𝆹𝅥𝅮'), - (0x1D1BE, 'M', '𝆺𝅥𝅮'), - (0x1D1BF, 'M', '𝆹𝅥𝅯'), - (0x1D1C0, 'M', '𝆺𝅥𝅯'), - (0x1D1C1, 'V'), - (0x1D1EB, 'X'), - (0x1D200, 'V'), - (0x1D246, 'X'), - (0x1D2C0, 'V'), - (0x1D2D4, 'X'), - (0x1D2E0, 'V'), - (0x1D2F4, 'X'), - (0x1D300, 'V'), - (0x1D357, 'X'), - (0x1D360, 'V'), - (0x1D379, 'X'), - (0x1D400, 'M', 'a'), - (0x1D401, 'M', 'b'), - (0x1D402, 'M', 'c'), - (0x1D403, 'M', 'd'), - (0x1D404, 'M', 'e'), - (0x1D405, 'M', 'f'), - (0x1D406, 'M', 'g'), - (0x1D407, 'M', 'h'), - (0x1D408, 'M', 'i'), - (0x1D409, 'M', 'j'), - (0x1D40A, 'M', 'k'), - (0x1D40B, 'M', 'l'), - (0x1D40C, 'M', 'm'), - (0x1D40D, 'M', 'n'), - (0x1D40E, 'M', 'o'), - (0x1D40F, 'M', 'p'), - (0x1D410, 'M', 'q'), - (0x1D411, 'M', 'r'), - (0x1D412, 'M', 's'), - (0x1D413, 'M', 't'), - (0x1D414, 'M', 'u'), - (0x1D415, 'M', 'v'), - (0x1D416, 'M', 'w'), - (0x1D417, 'M', 'x'), - (0x1D418, 'M', 'y'), - (0x1D419, 'M', 'z'), - (0x1D41A, 'M', 'a'), - (0x1D41B, 'M', 'b'), - (0x1D41C, 'M', 'c'), - (0x1D41D, 'M', 'd'), - (0x1D41E, 'M', 'e'), - (0x1D41F, 'M', 'f'), - (0x1D420, 'M', 'g'), - ] - -def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D421, 'M', 'h'), - (0x1D422, 'M', 'i'), - (0x1D423, 'M', 'j'), - (0x1D424, 'M', 'k'), - (0x1D425, 'M', 'l'), - (0x1D426, 'M', 'm'), - (0x1D427, 'M', 'n'), - (0x1D428, 'M', 'o'), - (0x1D429, 'M', 'p'), - (0x1D42A, 'M', 'q'), - (0x1D42B, 'M', 'r'), - (0x1D42C, 'M', 's'), - (0x1D42D, 'M', 't'), - (0x1D42E, 'M', 'u'), - (0x1D42F, 'M', 'v'), - (0x1D430, 'M', 'w'), - (0x1D431, 'M', 'x'), - (0x1D432, 'M', 'y'), - (0x1D433, 'M', 'z'), - (0x1D434, 'M', 'a'), - (0x1D435, 'M', 'b'), - (0x1D436, 'M', 'c'), - (0x1D437, 'M', 'd'), - (0x1D438, 'M', 'e'), - (0x1D439, 'M', 'f'), - (0x1D43A, 'M', 'g'), - (0x1D43B, 'M', 'h'), - (0x1D43C, 'M', 'i'), - (0x1D43D, 'M', 'j'), - (0x1D43E, 'M', 'k'), - (0x1D43F, 'M', 'l'), - (0x1D440, 'M', 'm'), - (0x1D441, 'M', 'n'), - (0x1D442, 'M', 'o'), - (0x1D443, 'M', 'p'), - (0x1D444, 'M', 'q'), - (0x1D445, 'M', 'r'), - (0x1D446, 'M', 's'), - (0x1D447, 'M', 't'), - (0x1D448, 'M', 'u'), - (0x1D449, 'M', 'v'), - (0x1D44A, 'M', 'w'), - (0x1D44B, 'M', 'x'), - (0x1D44C, 'M', 'y'), - (0x1D44D, 'M', 'z'), - (0x1D44E, 'M', 'a'), - (0x1D44F, 'M', 'b'), - (0x1D450, 'M', 'c'), - (0x1D451, 'M', 'd'), - (0x1D452, 'M', 'e'), - (0x1D453, 'M', 'f'), - (0x1D454, 'M', 'g'), - (0x1D455, 'X'), - (0x1D456, 'M', 'i'), - (0x1D457, 'M', 'j'), - (0x1D458, 'M', 'k'), - (0x1D459, 'M', 'l'), - (0x1D45A, 'M', 'm'), - (0x1D45B, 'M', 'n'), - (0x1D45C, 'M', 'o'), - (0x1D45D, 'M', 'p'), - (0x1D45E, 'M', 'q'), - (0x1D45F, 'M', 'r'), - (0x1D460, 'M', 's'), - (0x1D461, 'M', 't'), - (0x1D462, 'M', 'u'), - (0x1D463, 'M', 'v'), - (0x1D464, 'M', 'w'), - (0x1D465, 'M', 'x'), - (0x1D466, 'M', 'y'), - (0x1D467, 'M', 'z'), - (0x1D468, 'M', 'a'), - (0x1D469, 'M', 'b'), - (0x1D46A, 'M', 'c'), - (0x1D46B, 'M', 'd'), - (0x1D46C, 'M', 'e'), - (0x1D46D, 'M', 'f'), - (0x1D46E, 'M', 'g'), - (0x1D46F, 'M', 'h'), - (0x1D470, 'M', 'i'), - (0x1D471, 'M', 'j'), - (0x1D472, 'M', 'k'), - (0x1D473, 'M', 'l'), - (0x1D474, 'M', 'm'), - (0x1D475, 'M', 'n'), - (0x1D476, 'M', 'o'), - (0x1D477, 'M', 'p'), - (0x1D478, 'M', 'q'), - (0x1D479, 'M', 'r'), - (0x1D47A, 'M', 's'), - (0x1D47B, 'M', 't'), - (0x1D47C, 'M', 'u'), - (0x1D47D, 'M', 'v'), - (0x1D47E, 'M', 'w'), - (0x1D47F, 'M', 'x'), - (0x1D480, 'M', 'y'), - (0x1D481, 'M', 'z'), - (0x1D482, 'M', 'a'), - (0x1D483, 'M', 'b'), - (0x1D484, 'M', 'c'), - ] - -def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D485, 'M', 'd'), - (0x1D486, 'M', 'e'), - (0x1D487, 'M', 'f'), - (0x1D488, 'M', 'g'), - (0x1D489, 'M', 'h'), - (0x1D48A, 'M', 'i'), - (0x1D48B, 'M', 'j'), - (0x1D48C, 'M', 'k'), - (0x1D48D, 'M', 'l'), - (0x1D48E, 'M', 'm'), - (0x1D48F, 'M', 'n'), - (0x1D490, 'M', 'o'), - (0x1D491, 'M', 'p'), - (0x1D492, 'M', 'q'), - (0x1D493, 'M', 'r'), - (0x1D494, 'M', 's'), - (0x1D495, 'M', 't'), - (0x1D496, 'M', 'u'), - (0x1D497, 'M', 'v'), - (0x1D498, 'M', 'w'), - (0x1D499, 'M', 'x'), - (0x1D49A, 'M', 'y'), - (0x1D49B, 'M', 'z'), - (0x1D49C, 'M', 'a'), - (0x1D49D, 'X'), - (0x1D49E, 'M', 'c'), - (0x1D49F, 'M', 'd'), - (0x1D4A0, 'X'), - (0x1D4A2, 'M', 'g'), - (0x1D4A3, 'X'), - (0x1D4A5, 'M', 'j'), - (0x1D4A6, 'M', 'k'), - (0x1D4A7, 'X'), - (0x1D4A9, 'M', 'n'), - (0x1D4AA, 'M', 'o'), - (0x1D4AB, 'M', 'p'), - (0x1D4AC, 'M', 'q'), - (0x1D4AD, 'X'), - (0x1D4AE, 'M', 's'), - (0x1D4AF, 'M', 't'), - (0x1D4B0, 'M', 'u'), - (0x1D4B1, 'M', 'v'), - (0x1D4B2, 'M', 'w'), - (0x1D4B3, 'M', 'x'), - (0x1D4B4, 'M', 'y'), - (0x1D4B5, 'M', 'z'), - (0x1D4B6, 'M', 'a'), - (0x1D4B7, 'M', 'b'), - (0x1D4B8, 'M', 'c'), - (0x1D4B9, 'M', 'd'), - (0x1D4BA, 'X'), - (0x1D4BB, 'M', 'f'), - (0x1D4BC, 'X'), - (0x1D4BD, 'M', 'h'), - (0x1D4BE, 'M', 'i'), - (0x1D4BF, 'M', 'j'), - (0x1D4C0, 'M', 'k'), - (0x1D4C1, 'M', 'l'), - (0x1D4C2, 'M', 'm'), - (0x1D4C3, 'M', 'n'), - (0x1D4C4, 'X'), - (0x1D4C5, 'M', 'p'), - (0x1D4C6, 'M', 'q'), - (0x1D4C7, 'M', 'r'), - (0x1D4C8, 'M', 's'), - (0x1D4C9, 'M', 't'), - (0x1D4CA, 'M', 'u'), - (0x1D4CB, 'M', 'v'), - (0x1D4CC, 'M', 'w'), - (0x1D4CD, 'M', 'x'), - (0x1D4CE, 'M', 'y'), - (0x1D4CF, 'M', 'z'), - (0x1D4D0, 'M', 'a'), - (0x1D4D1, 'M', 'b'), - (0x1D4D2, 'M', 'c'), - (0x1D4D3, 'M', 'd'), - (0x1D4D4, 'M', 'e'), - (0x1D4D5, 'M', 'f'), - (0x1D4D6, 'M', 'g'), - (0x1D4D7, 'M', 'h'), - (0x1D4D8, 'M', 'i'), - (0x1D4D9, 'M', 'j'), - (0x1D4DA, 'M', 'k'), - (0x1D4DB, 'M', 'l'), - (0x1D4DC, 'M', 'm'), - (0x1D4DD, 'M', 'n'), - (0x1D4DE, 'M', 'o'), - (0x1D4DF, 'M', 'p'), - (0x1D4E0, 'M', 'q'), - (0x1D4E1, 'M', 'r'), - (0x1D4E2, 'M', 's'), - (0x1D4E3, 'M', 't'), - (0x1D4E4, 'M', 'u'), - (0x1D4E5, 'M', 'v'), - (0x1D4E6, 'M', 'w'), - (0x1D4E7, 'M', 'x'), - (0x1D4E8, 'M', 'y'), - (0x1D4E9, 'M', 'z'), - (0x1D4EA, 'M', 'a'), - (0x1D4EB, 'M', 'b'), - ] - -def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D4EC, 'M', 'c'), - (0x1D4ED, 'M', 'd'), - (0x1D4EE, 'M', 'e'), - (0x1D4EF, 'M', 'f'), - (0x1D4F0, 'M', 'g'), - (0x1D4F1, 'M', 'h'), - (0x1D4F2, 'M', 'i'), - (0x1D4F3, 'M', 'j'), - (0x1D4F4, 'M', 'k'), - (0x1D4F5, 'M', 'l'), - (0x1D4F6, 'M', 'm'), - (0x1D4F7, 'M', 'n'), - (0x1D4F8, 'M', 'o'), - (0x1D4F9, 'M', 'p'), - (0x1D4FA, 'M', 'q'), - (0x1D4FB, 'M', 'r'), - (0x1D4FC, 'M', 's'), - (0x1D4FD, 'M', 't'), - (0x1D4FE, 'M', 'u'), - (0x1D4FF, 'M', 'v'), - (0x1D500, 'M', 'w'), - (0x1D501, 'M', 'x'), - (0x1D502, 'M', 'y'), - (0x1D503, 'M', 'z'), - (0x1D504, 'M', 'a'), - (0x1D505, 'M', 'b'), - (0x1D506, 'X'), - (0x1D507, 'M', 'd'), - (0x1D508, 'M', 'e'), - (0x1D509, 'M', 'f'), - (0x1D50A, 'M', 'g'), - (0x1D50B, 'X'), - (0x1D50D, 'M', 'j'), - (0x1D50E, 'M', 'k'), - (0x1D50F, 'M', 'l'), - (0x1D510, 'M', 'm'), - (0x1D511, 'M', 'n'), - (0x1D512, 'M', 'o'), - (0x1D513, 'M', 'p'), - (0x1D514, 'M', 'q'), - (0x1D515, 'X'), - (0x1D516, 'M', 's'), - (0x1D517, 'M', 't'), - (0x1D518, 'M', 'u'), - (0x1D519, 'M', 'v'), - (0x1D51A, 'M', 'w'), - (0x1D51B, 'M', 'x'), - (0x1D51C, 'M', 'y'), - (0x1D51D, 'X'), - (0x1D51E, 'M', 'a'), - (0x1D51F, 'M', 'b'), - (0x1D520, 'M', 'c'), - (0x1D521, 'M', 'd'), - (0x1D522, 'M', 'e'), - (0x1D523, 'M', 'f'), - (0x1D524, 'M', 'g'), - (0x1D525, 'M', 'h'), - (0x1D526, 'M', 'i'), - (0x1D527, 'M', 'j'), - (0x1D528, 'M', 'k'), - (0x1D529, 'M', 'l'), - (0x1D52A, 'M', 'm'), - (0x1D52B, 'M', 'n'), - (0x1D52C, 'M', 'o'), - (0x1D52D, 'M', 'p'), - (0x1D52E, 'M', 'q'), - (0x1D52F, 'M', 'r'), - (0x1D530, 'M', 's'), - (0x1D531, 'M', 't'), - (0x1D532, 'M', 'u'), - (0x1D533, 'M', 'v'), - (0x1D534, 'M', 'w'), - (0x1D535, 'M', 'x'), - (0x1D536, 'M', 'y'), - (0x1D537, 'M', 'z'), - (0x1D538, 'M', 'a'), - (0x1D539, 'M', 'b'), - (0x1D53A, 'X'), - (0x1D53B, 'M', 'd'), - (0x1D53C, 'M', 'e'), - (0x1D53D, 'M', 'f'), - (0x1D53E, 'M', 'g'), - (0x1D53F, 'X'), - (0x1D540, 'M', 'i'), - (0x1D541, 'M', 'j'), - (0x1D542, 'M', 'k'), - (0x1D543, 'M', 'l'), - (0x1D544, 'M', 'm'), - (0x1D545, 'X'), - (0x1D546, 'M', 'o'), - (0x1D547, 'X'), - (0x1D54A, 'M', 's'), - (0x1D54B, 'M', 't'), - (0x1D54C, 'M', 'u'), - (0x1D54D, 'M', 'v'), - (0x1D54E, 'M', 'w'), - (0x1D54F, 'M', 'x'), - (0x1D550, 'M', 'y'), - (0x1D551, 'X'), - (0x1D552, 'M', 'a'), - ] - -def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D553, 'M', 'b'), - (0x1D554, 'M', 'c'), - (0x1D555, 'M', 'd'), - (0x1D556, 'M', 'e'), - (0x1D557, 'M', 'f'), - (0x1D558, 'M', 'g'), - (0x1D559, 'M', 'h'), - (0x1D55A, 'M', 'i'), - (0x1D55B, 'M', 'j'), - (0x1D55C, 'M', 'k'), - (0x1D55D, 'M', 'l'), - (0x1D55E, 'M', 'm'), - (0x1D55F, 'M', 'n'), - (0x1D560, 'M', 'o'), - (0x1D561, 'M', 'p'), - (0x1D562, 'M', 'q'), - (0x1D563, 'M', 'r'), - (0x1D564, 'M', 's'), - (0x1D565, 'M', 't'), - (0x1D566, 'M', 'u'), - (0x1D567, 'M', 'v'), - (0x1D568, 'M', 'w'), - (0x1D569, 'M', 'x'), - (0x1D56A, 'M', 'y'), - (0x1D56B, 'M', 'z'), - (0x1D56C, 'M', 'a'), - (0x1D56D, 'M', 'b'), - (0x1D56E, 'M', 'c'), - (0x1D56F, 'M', 'd'), - (0x1D570, 'M', 'e'), - (0x1D571, 'M', 'f'), - (0x1D572, 'M', 'g'), - (0x1D573, 'M', 'h'), - (0x1D574, 'M', 'i'), - (0x1D575, 'M', 'j'), - (0x1D576, 'M', 'k'), - (0x1D577, 'M', 'l'), - (0x1D578, 'M', 'm'), - (0x1D579, 'M', 'n'), - (0x1D57A, 'M', 'o'), - (0x1D57B, 'M', 'p'), - (0x1D57C, 'M', 'q'), - (0x1D57D, 'M', 'r'), - (0x1D57E, 'M', 's'), - (0x1D57F, 'M', 't'), - (0x1D580, 'M', 'u'), - (0x1D581, 'M', 'v'), - (0x1D582, 'M', 'w'), - (0x1D583, 'M', 'x'), - (0x1D584, 'M', 'y'), - (0x1D585, 'M', 'z'), - (0x1D586, 'M', 'a'), - (0x1D587, 'M', 'b'), - (0x1D588, 'M', 'c'), - (0x1D589, 'M', 'd'), - (0x1D58A, 'M', 'e'), - (0x1D58B, 'M', 'f'), - (0x1D58C, 'M', 'g'), - (0x1D58D, 'M', 'h'), - (0x1D58E, 'M', 'i'), - (0x1D58F, 'M', 'j'), - (0x1D590, 'M', 'k'), - (0x1D591, 'M', 'l'), - (0x1D592, 'M', 'm'), - (0x1D593, 'M', 'n'), - (0x1D594, 'M', 'o'), - (0x1D595, 'M', 'p'), - (0x1D596, 'M', 'q'), - (0x1D597, 'M', 'r'), - (0x1D598, 'M', 's'), - (0x1D599, 'M', 't'), - (0x1D59A, 'M', 'u'), - (0x1D59B, 'M', 'v'), - (0x1D59C, 'M', 'w'), - (0x1D59D, 'M', 'x'), - (0x1D59E, 'M', 'y'), - (0x1D59F, 'M', 'z'), - (0x1D5A0, 'M', 'a'), - (0x1D5A1, 'M', 'b'), - (0x1D5A2, 'M', 'c'), - (0x1D5A3, 'M', 'd'), - (0x1D5A4, 'M', 'e'), - (0x1D5A5, 'M', 'f'), - (0x1D5A6, 'M', 'g'), - (0x1D5A7, 'M', 'h'), - (0x1D5A8, 'M', 'i'), - (0x1D5A9, 'M', 'j'), - (0x1D5AA, 'M', 'k'), - (0x1D5AB, 'M', 'l'), - (0x1D5AC, 'M', 'm'), - (0x1D5AD, 'M', 'n'), - (0x1D5AE, 'M', 'o'), - (0x1D5AF, 'M', 'p'), - (0x1D5B0, 'M', 'q'), - (0x1D5B1, 'M', 'r'), - (0x1D5B2, 'M', 's'), - (0x1D5B3, 'M', 't'), - (0x1D5B4, 'M', 'u'), - (0x1D5B5, 'M', 'v'), - (0x1D5B6, 'M', 'w'), - ] - -def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D5B7, 'M', 'x'), - (0x1D5B8, 'M', 'y'), - (0x1D5B9, 'M', 'z'), - (0x1D5BA, 'M', 'a'), - (0x1D5BB, 'M', 'b'), - (0x1D5BC, 'M', 'c'), - (0x1D5BD, 'M', 'd'), - (0x1D5BE, 'M', 'e'), - (0x1D5BF, 'M', 'f'), - (0x1D5C0, 'M', 'g'), - (0x1D5C1, 'M', 'h'), - (0x1D5C2, 'M', 'i'), - (0x1D5C3, 'M', 'j'), - (0x1D5C4, 'M', 'k'), - (0x1D5C5, 'M', 'l'), - (0x1D5C6, 'M', 'm'), - (0x1D5C7, 'M', 'n'), - (0x1D5C8, 'M', 'o'), - (0x1D5C9, 'M', 'p'), - (0x1D5CA, 'M', 'q'), - (0x1D5CB, 'M', 'r'), - (0x1D5CC, 'M', 's'), - (0x1D5CD, 'M', 't'), - (0x1D5CE, 'M', 'u'), - (0x1D5CF, 'M', 'v'), - (0x1D5D0, 'M', 'w'), - (0x1D5D1, 'M', 'x'), - (0x1D5D2, 'M', 'y'), - (0x1D5D3, 'M', 'z'), - (0x1D5D4, 'M', 'a'), - (0x1D5D5, 'M', 'b'), - (0x1D5D6, 'M', 'c'), - (0x1D5D7, 'M', 'd'), - (0x1D5D8, 'M', 'e'), - (0x1D5D9, 'M', 'f'), - (0x1D5DA, 'M', 'g'), - (0x1D5DB, 'M', 'h'), - (0x1D5DC, 'M', 'i'), - (0x1D5DD, 'M', 'j'), - (0x1D5DE, 'M', 'k'), - (0x1D5DF, 'M', 'l'), - (0x1D5E0, 'M', 'm'), - (0x1D5E1, 'M', 'n'), - (0x1D5E2, 'M', 'o'), - (0x1D5E3, 'M', 'p'), - (0x1D5E4, 'M', 'q'), - (0x1D5E5, 'M', 'r'), - (0x1D5E6, 'M', 's'), - (0x1D5E7, 'M', 't'), - (0x1D5E8, 'M', 'u'), - (0x1D5E9, 'M', 'v'), - (0x1D5EA, 'M', 'w'), - (0x1D5EB, 'M', 'x'), - (0x1D5EC, 'M', 'y'), - (0x1D5ED, 'M', 'z'), - (0x1D5EE, 'M', 'a'), - (0x1D5EF, 'M', 'b'), - (0x1D5F0, 'M', 'c'), - (0x1D5F1, 'M', 'd'), - (0x1D5F2, 'M', 'e'), - (0x1D5F3, 'M', 'f'), - (0x1D5F4, 'M', 'g'), - (0x1D5F5, 'M', 'h'), - (0x1D5F6, 'M', 'i'), - (0x1D5F7, 'M', 'j'), - (0x1D5F8, 'M', 'k'), - (0x1D5F9, 'M', 'l'), - (0x1D5FA, 'M', 'm'), - (0x1D5FB, 'M', 'n'), - (0x1D5FC, 'M', 'o'), - (0x1D5FD, 'M', 'p'), - (0x1D5FE, 'M', 'q'), - (0x1D5FF, 'M', 'r'), - (0x1D600, 'M', 's'), - (0x1D601, 'M', 't'), - (0x1D602, 'M', 'u'), - (0x1D603, 'M', 'v'), - (0x1D604, 'M', 'w'), - (0x1D605, 'M', 'x'), - (0x1D606, 'M', 'y'), - (0x1D607, 'M', 'z'), - (0x1D608, 'M', 'a'), - (0x1D609, 'M', 'b'), - (0x1D60A, 'M', 'c'), - (0x1D60B, 'M', 'd'), - (0x1D60C, 'M', 'e'), - (0x1D60D, 'M', 'f'), - (0x1D60E, 'M', 'g'), - (0x1D60F, 'M', 'h'), - (0x1D610, 'M', 'i'), - (0x1D611, 'M', 'j'), - (0x1D612, 'M', 'k'), - (0x1D613, 'M', 'l'), - (0x1D614, 'M', 'm'), - (0x1D615, 'M', 'n'), - (0x1D616, 'M', 'o'), - (0x1D617, 'M', 'p'), - (0x1D618, 'M', 'q'), - (0x1D619, 'M', 'r'), - (0x1D61A, 'M', 's'), - ] - -def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D61B, 'M', 't'), - (0x1D61C, 'M', 'u'), - (0x1D61D, 'M', 'v'), - (0x1D61E, 'M', 'w'), - (0x1D61F, 'M', 'x'), - (0x1D620, 'M', 'y'), - (0x1D621, 'M', 'z'), - (0x1D622, 'M', 'a'), - (0x1D623, 'M', 'b'), - (0x1D624, 'M', 'c'), - (0x1D625, 'M', 'd'), - (0x1D626, 'M', 'e'), - (0x1D627, 'M', 'f'), - (0x1D628, 'M', 'g'), - (0x1D629, 'M', 'h'), - (0x1D62A, 'M', 'i'), - (0x1D62B, 'M', 'j'), - (0x1D62C, 'M', 'k'), - (0x1D62D, 'M', 'l'), - (0x1D62E, 'M', 'm'), - (0x1D62F, 'M', 'n'), - (0x1D630, 'M', 'o'), - (0x1D631, 'M', 'p'), - (0x1D632, 'M', 'q'), - (0x1D633, 'M', 'r'), - (0x1D634, 'M', 's'), - (0x1D635, 'M', 't'), - (0x1D636, 'M', 'u'), - (0x1D637, 'M', 'v'), - (0x1D638, 'M', 'w'), - (0x1D639, 'M', 'x'), - (0x1D63A, 'M', 'y'), - (0x1D63B, 'M', 'z'), - (0x1D63C, 'M', 'a'), - (0x1D63D, 'M', 'b'), - (0x1D63E, 'M', 'c'), - (0x1D63F, 'M', 'd'), - (0x1D640, 'M', 'e'), - (0x1D641, 'M', 'f'), - (0x1D642, 'M', 'g'), - (0x1D643, 'M', 'h'), - (0x1D644, 'M', 'i'), - (0x1D645, 'M', 'j'), - (0x1D646, 'M', 'k'), - (0x1D647, 'M', 'l'), - (0x1D648, 'M', 'm'), - (0x1D649, 'M', 'n'), - (0x1D64A, 'M', 'o'), - (0x1D64B, 'M', 'p'), - (0x1D64C, 'M', 'q'), - (0x1D64D, 'M', 'r'), - (0x1D64E, 'M', 's'), - (0x1D64F, 'M', 't'), - (0x1D650, 'M', 'u'), - (0x1D651, 'M', 'v'), - (0x1D652, 'M', 'w'), - (0x1D653, 'M', 'x'), - (0x1D654, 'M', 'y'), - (0x1D655, 'M', 'z'), - (0x1D656, 'M', 'a'), - (0x1D657, 'M', 'b'), - (0x1D658, 'M', 'c'), - (0x1D659, 'M', 'd'), - (0x1D65A, 'M', 'e'), - (0x1D65B, 'M', 'f'), - (0x1D65C, 'M', 'g'), - (0x1D65D, 'M', 'h'), - (0x1D65E, 'M', 'i'), - (0x1D65F, 'M', 'j'), - (0x1D660, 'M', 'k'), - (0x1D661, 'M', 'l'), - (0x1D662, 'M', 'm'), - (0x1D663, 'M', 'n'), - (0x1D664, 'M', 'o'), - (0x1D665, 'M', 'p'), - (0x1D666, 'M', 'q'), - (0x1D667, 'M', 'r'), - (0x1D668, 'M', 's'), - (0x1D669, 'M', 't'), - (0x1D66A, 'M', 'u'), - (0x1D66B, 'M', 'v'), - (0x1D66C, 'M', 'w'), - (0x1D66D, 'M', 'x'), - (0x1D66E, 'M', 'y'), - (0x1D66F, 'M', 'z'), - (0x1D670, 'M', 'a'), - (0x1D671, 'M', 'b'), - (0x1D672, 'M', 'c'), - (0x1D673, 'M', 'd'), - (0x1D674, 'M', 'e'), - (0x1D675, 'M', 'f'), - (0x1D676, 'M', 'g'), - (0x1D677, 'M', 'h'), - (0x1D678, 'M', 'i'), - (0x1D679, 'M', 'j'), - (0x1D67A, 'M', 'k'), - (0x1D67B, 'M', 'l'), - (0x1D67C, 'M', 'm'), - (0x1D67D, 'M', 'n'), - (0x1D67E, 'M', 'o'), - ] - -def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D67F, 'M', 'p'), - (0x1D680, 'M', 'q'), - (0x1D681, 'M', 'r'), - (0x1D682, 'M', 's'), - (0x1D683, 'M', 't'), - (0x1D684, 'M', 'u'), - (0x1D685, 'M', 'v'), - (0x1D686, 'M', 'w'), - (0x1D687, 'M', 'x'), - (0x1D688, 'M', 'y'), - (0x1D689, 'M', 'z'), - (0x1D68A, 'M', 'a'), - (0x1D68B, 'M', 'b'), - (0x1D68C, 'M', 'c'), - (0x1D68D, 'M', 'd'), - (0x1D68E, 'M', 'e'), - (0x1D68F, 'M', 'f'), - (0x1D690, 'M', 'g'), - (0x1D691, 'M', 'h'), - (0x1D692, 'M', 'i'), - (0x1D693, 'M', 'j'), - (0x1D694, 'M', 'k'), - (0x1D695, 'M', 'l'), - (0x1D696, 'M', 'm'), - (0x1D697, 'M', 'n'), - (0x1D698, 'M', 'o'), - (0x1D699, 'M', 'p'), - (0x1D69A, 'M', 'q'), - (0x1D69B, 'M', 'r'), - (0x1D69C, 'M', 's'), - (0x1D69D, 'M', 't'), - (0x1D69E, 'M', 'u'), - (0x1D69F, 'M', 'v'), - (0x1D6A0, 'M', 'w'), - (0x1D6A1, 'M', 'x'), - (0x1D6A2, 'M', 'y'), - (0x1D6A3, 'M', 'z'), - (0x1D6A4, 'M', 'ı'), - (0x1D6A5, 'M', 'ȷ'), - (0x1D6A6, 'X'), - (0x1D6A8, 'M', 'α'), - (0x1D6A9, 'M', 'β'), - (0x1D6AA, 'M', 'γ'), - (0x1D6AB, 'M', 'δ'), - (0x1D6AC, 'M', 'ε'), - (0x1D6AD, 'M', 'ζ'), - (0x1D6AE, 'M', 'η'), - (0x1D6AF, 'M', 'θ'), - (0x1D6B0, 'M', 'ι'), - (0x1D6B1, 'M', 'κ'), - (0x1D6B2, 'M', 'λ'), - (0x1D6B3, 'M', 'μ'), - (0x1D6B4, 'M', 'ν'), - (0x1D6B5, 'M', 'ξ'), - (0x1D6B6, 'M', 'ο'), - (0x1D6B7, 'M', 'π'), - (0x1D6B8, 'M', 'ρ'), - (0x1D6B9, 'M', 'θ'), - (0x1D6BA, 'M', 'σ'), - (0x1D6BB, 'M', 'τ'), - (0x1D6BC, 'M', 'υ'), - (0x1D6BD, 'M', 'φ'), - (0x1D6BE, 'M', 'χ'), - (0x1D6BF, 'M', 'ψ'), - (0x1D6C0, 'M', 'ω'), - (0x1D6C1, 'M', '∇'), - (0x1D6C2, 'M', 'α'), - (0x1D6C3, 'M', 'β'), - (0x1D6C4, 'M', 'γ'), - (0x1D6C5, 'M', 'δ'), - (0x1D6C6, 'M', 'ε'), - (0x1D6C7, 'M', 'ζ'), - (0x1D6C8, 'M', 'η'), - (0x1D6C9, 'M', 'θ'), - (0x1D6CA, 'M', 'ι'), - (0x1D6CB, 'M', 'κ'), - (0x1D6CC, 'M', 'λ'), - (0x1D6CD, 'M', 'μ'), - (0x1D6CE, 'M', 'ν'), - (0x1D6CF, 'M', 'ξ'), - (0x1D6D0, 'M', 'ο'), - (0x1D6D1, 'M', 'π'), - (0x1D6D2, 'M', 'ρ'), - (0x1D6D3, 'M', 'σ'), - (0x1D6D5, 'M', 'τ'), - (0x1D6D6, 'M', 'υ'), - (0x1D6D7, 'M', 'φ'), - (0x1D6D8, 'M', 'χ'), - (0x1D6D9, 'M', 'ψ'), - (0x1D6DA, 'M', 'ω'), - (0x1D6DB, 'M', '∂'), - (0x1D6DC, 'M', 'ε'), - (0x1D6DD, 'M', 'θ'), - (0x1D6DE, 'M', 'κ'), - (0x1D6DF, 'M', 'φ'), - (0x1D6E0, 'M', 'ρ'), - (0x1D6E1, 'M', 'π'), - (0x1D6E2, 'M', 'α'), - (0x1D6E3, 'M', 'β'), - (0x1D6E4, 'M', 'γ'), - ] - -def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D6E5, 'M', 'δ'), - (0x1D6E6, 'M', 'ε'), - (0x1D6E7, 'M', 'ζ'), - (0x1D6E8, 'M', 'η'), - (0x1D6E9, 'M', 'θ'), - (0x1D6EA, 'M', 'ι'), - (0x1D6EB, 'M', 'κ'), - (0x1D6EC, 'M', 'λ'), - (0x1D6ED, 'M', 'μ'), - (0x1D6EE, 'M', 'ν'), - (0x1D6EF, 'M', 'ξ'), - (0x1D6F0, 'M', 'ο'), - (0x1D6F1, 'M', 'π'), - (0x1D6F2, 'M', 'ρ'), - (0x1D6F3, 'M', 'θ'), - (0x1D6F4, 'M', 'σ'), - (0x1D6F5, 'M', 'τ'), - (0x1D6F6, 'M', 'υ'), - (0x1D6F7, 'M', 'φ'), - (0x1D6F8, 'M', 'χ'), - (0x1D6F9, 'M', 'ψ'), - (0x1D6FA, 'M', 'ω'), - (0x1D6FB, 'M', '∇'), - (0x1D6FC, 'M', 'α'), - (0x1D6FD, 'M', 'β'), - (0x1D6FE, 'M', 'γ'), - (0x1D6FF, 'M', 'δ'), - (0x1D700, 'M', 'ε'), - (0x1D701, 'M', 'ζ'), - (0x1D702, 'M', 'η'), - (0x1D703, 'M', 'θ'), - (0x1D704, 'M', 'ι'), - (0x1D705, 'M', 'κ'), - (0x1D706, 'M', 'λ'), - (0x1D707, 'M', 'μ'), - (0x1D708, 'M', 'ν'), - (0x1D709, 'M', 'ξ'), - (0x1D70A, 'M', 'ο'), - (0x1D70B, 'M', 'π'), - (0x1D70C, 'M', 'ρ'), - (0x1D70D, 'M', 'σ'), - (0x1D70F, 'M', 'τ'), - (0x1D710, 'M', 'υ'), - (0x1D711, 'M', 'φ'), - (0x1D712, 'M', 'χ'), - (0x1D713, 'M', 'ψ'), - (0x1D714, 'M', 'ω'), - (0x1D715, 'M', '∂'), - (0x1D716, 'M', 'ε'), - (0x1D717, 'M', 'θ'), - (0x1D718, 'M', 'κ'), - (0x1D719, 'M', 'φ'), - (0x1D71A, 'M', 'ρ'), - (0x1D71B, 'M', 'π'), - (0x1D71C, 'M', 'α'), - (0x1D71D, 'M', 'β'), - (0x1D71E, 'M', 'γ'), - (0x1D71F, 'M', 'δ'), - (0x1D720, 'M', 'ε'), - (0x1D721, 'M', 'ζ'), - (0x1D722, 'M', 'η'), - (0x1D723, 'M', 'θ'), - (0x1D724, 'M', 'ι'), - (0x1D725, 'M', 'κ'), - (0x1D726, 'M', 'λ'), - (0x1D727, 'M', 'μ'), - (0x1D728, 'M', 'ν'), - (0x1D729, 'M', 'ξ'), - (0x1D72A, 'M', 'ο'), - (0x1D72B, 'M', 'π'), - (0x1D72C, 'M', 'ρ'), - (0x1D72D, 'M', 'θ'), - (0x1D72E, 'M', 'σ'), - (0x1D72F, 'M', 'τ'), - (0x1D730, 'M', 'υ'), - (0x1D731, 'M', 'φ'), - (0x1D732, 'M', 'χ'), - (0x1D733, 'M', 'ψ'), - (0x1D734, 'M', 'ω'), - (0x1D735, 'M', '∇'), - (0x1D736, 'M', 'α'), - (0x1D737, 'M', 'β'), - (0x1D738, 'M', 'γ'), - (0x1D739, 'M', 'δ'), - (0x1D73A, 'M', 'ε'), - (0x1D73B, 'M', 'ζ'), - (0x1D73C, 'M', 'η'), - (0x1D73D, 'M', 'θ'), - (0x1D73E, 'M', 'ι'), - (0x1D73F, 'M', 'κ'), - (0x1D740, 'M', 'λ'), - (0x1D741, 'M', 'μ'), - (0x1D742, 'M', 'ν'), - (0x1D743, 'M', 'ξ'), - (0x1D744, 'M', 'ο'), - (0x1D745, 'M', 'π'), - (0x1D746, 'M', 'ρ'), - (0x1D747, 'M', 'σ'), - (0x1D749, 'M', 'τ'), - (0x1D74A, 'M', 'υ'), - ] - -def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D74B, 'M', 'φ'), - (0x1D74C, 'M', 'χ'), - (0x1D74D, 'M', 'ψ'), - (0x1D74E, 'M', 'ω'), - (0x1D74F, 'M', '∂'), - (0x1D750, 'M', 'ε'), - (0x1D751, 'M', 'θ'), - (0x1D752, 'M', 'κ'), - (0x1D753, 'M', 'φ'), - (0x1D754, 'M', 'ρ'), - (0x1D755, 'M', 'π'), - (0x1D756, 'M', 'α'), - (0x1D757, 'M', 'β'), - (0x1D758, 'M', 'γ'), - (0x1D759, 'M', 'δ'), - (0x1D75A, 'M', 'ε'), - (0x1D75B, 'M', 'ζ'), - (0x1D75C, 'M', 'η'), - (0x1D75D, 'M', 'θ'), - (0x1D75E, 'M', 'ι'), - (0x1D75F, 'M', 'κ'), - (0x1D760, 'M', 'λ'), - (0x1D761, 'M', 'μ'), - (0x1D762, 'M', 'ν'), - (0x1D763, 'M', 'ξ'), - (0x1D764, 'M', 'ο'), - (0x1D765, 'M', 'π'), - (0x1D766, 'M', 'ρ'), - (0x1D767, 'M', 'θ'), - (0x1D768, 'M', 'σ'), - (0x1D769, 'M', 'τ'), - (0x1D76A, 'M', 'υ'), - (0x1D76B, 'M', 'φ'), - (0x1D76C, 'M', 'χ'), - (0x1D76D, 'M', 'ψ'), - (0x1D76E, 'M', 'ω'), - (0x1D76F, 'M', '∇'), - (0x1D770, 'M', 'α'), - (0x1D771, 'M', 'β'), - (0x1D772, 'M', 'γ'), - (0x1D773, 'M', 'δ'), - (0x1D774, 'M', 'ε'), - (0x1D775, 'M', 'ζ'), - (0x1D776, 'M', 'η'), - (0x1D777, 'M', 'θ'), - (0x1D778, 'M', 'ι'), - (0x1D779, 'M', 'κ'), - (0x1D77A, 'M', 'λ'), - (0x1D77B, 'M', 'μ'), - (0x1D77C, 'M', 'ν'), - (0x1D77D, 'M', 'ξ'), - (0x1D77E, 'M', 'ο'), - (0x1D77F, 'M', 'π'), - (0x1D780, 'M', 'ρ'), - (0x1D781, 'M', 'σ'), - (0x1D783, 'M', 'τ'), - (0x1D784, 'M', 'υ'), - (0x1D785, 'M', 'φ'), - (0x1D786, 'M', 'χ'), - (0x1D787, 'M', 'ψ'), - (0x1D788, 'M', 'ω'), - (0x1D789, 'M', '∂'), - (0x1D78A, 'M', 'ε'), - (0x1D78B, 'M', 'θ'), - (0x1D78C, 'M', 'κ'), - (0x1D78D, 'M', 'φ'), - (0x1D78E, 'M', 'ρ'), - (0x1D78F, 'M', 'π'), - (0x1D790, 'M', 'α'), - (0x1D791, 'M', 'β'), - (0x1D792, 'M', 'γ'), - (0x1D793, 'M', 'δ'), - (0x1D794, 'M', 'ε'), - (0x1D795, 'M', 'ζ'), - (0x1D796, 'M', 'η'), - (0x1D797, 'M', 'θ'), - (0x1D798, 'M', 'ι'), - (0x1D799, 'M', 'κ'), - (0x1D79A, 'M', 'λ'), - (0x1D79B, 'M', 'μ'), - (0x1D79C, 'M', 'ν'), - (0x1D79D, 'M', 'ξ'), - (0x1D79E, 'M', 'ο'), - (0x1D79F, 'M', 'π'), - (0x1D7A0, 'M', 'ρ'), - (0x1D7A1, 'M', 'θ'), - (0x1D7A2, 'M', 'σ'), - (0x1D7A3, 'M', 'τ'), - (0x1D7A4, 'M', 'υ'), - (0x1D7A5, 'M', 'φ'), - (0x1D7A6, 'M', 'χ'), - (0x1D7A7, 'M', 'ψ'), - (0x1D7A8, 'M', 'ω'), - (0x1D7A9, 'M', '∇'), - (0x1D7AA, 'M', 'α'), - (0x1D7AB, 'M', 'β'), - (0x1D7AC, 'M', 'γ'), - (0x1D7AD, 'M', 'δ'), - (0x1D7AE, 'M', 'ε'), - (0x1D7AF, 'M', 'ζ'), - ] - -def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D7B0, 'M', 'η'), - (0x1D7B1, 'M', 'θ'), - (0x1D7B2, 'M', 'ι'), - (0x1D7B3, 'M', 'κ'), - (0x1D7B4, 'M', 'λ'), - (0x1D7B5, 'M', 'μ'), - (0x1D7B6, 'M', 'ν'), - (0x1D7B7, 'M', 'ξ'), - (0x1D7B8, 'M', 'ο'), - (0x1D7B9, 'M', 'π'), - (0x1D7BA, 'M', 'ρ'), - (0x1D7BB, 'M', 'σ'), - (0x1D7BD, 'M', 'τ'), - (0x1D7BE, 'M', 'υ'), - (0x1D7BF, 'M', 'φ'), - (0x1D7C0, 'M', 'χ'), - (0x1D7C1, 'M', 'ψ'), - (0x1D7C2, 'M', 'ω'), - (0x1D7C3, 'M', '∂'), - (0x1D7C4, 'M', 'ε'), - (0x1D7C5, 'M', 'θ'), - (0x1D7C6, 'M', 'κ'), - (0x1D7C7, 'M', 'φ'), - (0x1D7C8, 'M', 'ρ'), - (0x1D7C9, 'M', 'π'), - (0x1D7CA, 'M', 'ϝ'), - (0x1D7CC, 'X'), - (0x1D7CE, 'M', '0'), - (0x1D7CF, 'M', '1'), - (0x1D7D0, 'M', '2'), - (0x1D7D1, 'M', '3'), - (0x1D7D2, 'M', '4'), - (0x1D7D3, 'M', '5'), - (0x1D7D4, 'M', '6'), - (0x1D7D5, 'M', '7'), - (0x1D7D6, 'M', '8'), - (0x1D7D7, 'M', '9'), - (0x1D7D8, 'M', '0'), - (0x1D7D9, 'M', '1'), - (0x1D7DA, 'M', '2'), - (0x1D7DB, 'M', '3'), - (0x1D7DC, 'M', '4'), - (0x1D7DD, 'M', '5'), - (0x1D7DE, 'M', '6'), - (0x1D7DF, 'M', '7'), - (0x1D7E0, 'M', '8'), - (0x1D7E1, 'M', '9'), - (0x1D7E2, 'M', '0'), - (0x1D7E3, 'M', '1'), - (0x1D7E4, 'M', '2'), - (0x1D7E5, 'M', '3'), - (0x1D7E6, 'M', '4'), - (0x1D7E7, 'M', '5'), - (0x1D7E8, 'M', '6'), - (0x1D7E9, 'M', '7'), - (0x1D7EA, 'M', '8'), - (0x1D7EB, 'M', '9'), - (0x1D7EC, 'M', '0'), - (0x1D7ED, 'M', '1'), - (0x1D7EE, 'M', '2'), - (0x1D7EF, 'M', '3'), - (0x1D7F0, 'M', '4'), - (0x1D7F1, 'M', '5'), - (0x1D7F2, 'M', '6'), - (0x1D7F3, 'M', '7'), - (0x1D7F4, 'M', '8'), - (0x1D7F5, 'M', '9'), - (0x1D7F6, 'M', '0'), - (0x1D7F7, 'M', '1'), - (0x1D7F8, 'M', '2'), - (0x1D7F9, 'M', '3'), - (0x1D7FA, 'M', '4'), - (0x1D7FB, 'M', '5'), - (0x1D7FC, 'M', '6'), - (0x1D7FD, 'M', '7'), - (0x1D7FE, 'M', '8'), - (0x1D7FF, 'M', '9'), - (0x1D800, 'V'), - (0x1DA8C, 'X'), - (0x1DA9B, 'V'), - (0x1DAA0, 'X'), - (0x1DAA1, 'V'), - (0x1DAB0, 'X'), - (0x1DF00, 'V'), - (0x1DF1F, 'X'), - (0x1DF25, 'V'), - (0x1DF2B, 'X'), - (0x1E000, 'V'), - (0x1E007, 'X'), - (0x1E008, 'V'), - (0x1E019, 'X'), - (0x1E01B, 'V'), - (0x1E022, 'X'), - (0x1E023, 'V'), - (0x1E025, 'X'), - (0x1E026, 'V'), - (0x1E02B, 'X'), - (0x1E030, 'M', 'а'), - (0x1E031, 'M', 'б'), - (0x1E032, 'M', 'в'), - ] - -def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E033, 'M', 'г'), - (0x1E034, 'M', 'д'), - (0x1E035, 'M', 'е'), - (0x1E036, 'M', 'ж'), - (0x1E037, 'M', 'з'), - (0x1E038, 'M', 'и'), - (0x1E039, 'M', 'к'), - (0x1E03A, 'M', 'л'), - (0x1E03B, 'M', 'м'), - (0x1E03C, 'M', 'о'), - (0x1E03D, 'M', 'п'), - (0x1E03E, 'M', 'р'), - (0x1E03F, 'M', 'с'), - (0x1E040, 'M', 'т'), - (0x1E041, 'M', 'у'), - (0x1E042, 'M', 'ф'), - (0x1E043, 'M', 'х'), - (0x1E044, 'M', 'ц'), - (0x1E045, 'M', 'ч'), - (0x1E046, 'M', 'ш'), - (0x1E047, 'M', 'ы'), - (0x1E048, 'M', 'э'), - (0x1E049, 'M', 'ю'), - (0x1E04A, 'M', 'ꚉ'), - (0x1E04B, 'M', 'ә'), - (0x1E04C, 'M', 'і'), - (0x1E04D, 'M', 'ј'), - (0x1E04E, 'M', 'ө'), - (0x1E04F, 'M', 'ү'), - (0x1E050, 'M', 'ӏ'), - (0x1E051, 'M', 'а'), - (0x1E052, 'M', 'б'), - (0x1E053, 'M', 'в'), - (0x1E054, 'M', 'г'), - (0x1E055, 'M', 'д'), - (0x1E056, 'M', 'е'), - (0x1E057, 'M', 'ж'), - (0x1E058, 'M', 'з'), - (0x1E059, 'M', 'и'), - (0x1E05A, 'M', 'к'), - (0x1E05B, 'M', 'л'), - (0x1E05C, 'M', 'о'), - (0x1E05D, 'M', 'п'), - (0x1E05E, 'M', 'с'), - (0x1E05F, 'M', 'у'), - (0x1E060, 'M', 'ф'), - (0x1E061, 'M', 'х'), - (0x1E062, 'M', 'ц'), - (0x1E063, 'M', 'ч'), - (0x1E064, 'M', 'ш'), - (0x1E065, 'M', 'ъ'), - (0x1E066, 'M', 'ы'), - (0x1E067, 'M', 'ґ'), - (0x1E068, 'M', 'і'), - (0x1E069, 'M', 'ѕ'), - (0x1E06A, 'M', 'џ'), - (0x1E06B, 'M', 'ҫ'), - (0x1E06C, 'M', 'ꙑ'), - (0x1E06D, 'M', 'ұ'), - (0x1E06E, 'X'), - (0x1E08F, 'V'), - (0x1E090, 'X'), - (0x1E100, 'V'), - (0x1E12D, 'X'), - (0x1E130, 'V'), - (0x1E13E, 'X'), - (0x1E140, 'V'), - (0x1E14A, 'X'), - (0x1E14E, 'V'), - (0x1E150, 'X'), - (0x1E290, 'V'), - (0x1E2AF, 'X'), - (0x1E2C0, 'V'), - (0x1E2FA, 'X'), - (0x1E2FF, 'V'), - (0x1E300, 'X'), - (0x1E4D0, 'V'), - (0x1E4FA, 'X'), - (0x1E7E0, 'V'), - (0x1E7E7, 'X'), - (0x1E7E8, 'V'), - (0x1E7EC, 'X'), - (0x1E7ED, 'V'), - (0x1E7EF, 'X'), - (0x1E7F0, 'V'), - (0x1E7FF, 'X'), - (0x1E800, 'V'), - (0x1E8C5, 'X'), - (0x1E8C7, 'V'), - (0x1E8D7, 'X'), - (0x1E900, 'M', '𞤢'), - (0x1E901, 'M', '𞤣'), - (0x1E902, 'M', '𞤤'), - (0x1E903, 'M', '𞤥'), - (0x1E904, 'M', '𞤦'), - (0x1E905, 'M', '𞤧'), - (0x1E906, 'M', '𞤨'), - (0x1E907, 'M', '𞤩'), - (0x1E908, 'M', '𞤪'), - (0x1E909, 'M', '𞤫'), - ] - -def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E90A, 'M', '𞤬'), - (0x1E90B, 'M', '𞤭'), - (0x1E90C, 'M', '𞤮'), - (0x1E90D, 'M', '𞤯'), - (0x1E90E, 'M', '𞤰'), - (0x1E90F, 'M', '𞤱'), - (0x1E910, 'M', '𞤲'), - (0x1E911, 'M', '𞤳'), - (0x1E912, 'M', '𞤴'), - (0x1E913, 'M', '𞤵'), - (0x1E914, 'M', '𞤶'), - (0x1E915, 'M', '𞤷'), - (0x1E916, 'M', '𞤸'), - (0x1E917, 'M', '𞤹'), - (0x1E918, 'M', '𞤺'), - (0x1E919, 'M', '𞤻'), - (0x1E91A, 'M', '𞤼'), - (0x1E91B, 'M', '𞤽'), - (0x1E91C, 'M', '𞤾'), - (0x1E91D, 'M', '𞤿'), - (0x1E91E, 'M', '𞥀'), - (0x1E91F, 'M', '𞥁'), - (0x1E920, 'M', '𞥂'), - (0x1E921, 'M', '𞥃'), - (0x1E922, 'V'), - (0x1E94C, 'X'), - (0x1E950, 'V'), - (0x1E95A, 'X'), - (0x1E95E, 'V'), - (0x1E960, 'X'), - (0x1EC71, 'V'), - (0x1ECB5, 'X'), - (0x1ED01, 'V'), - (0x1ED3E, 'X'), - (0x1EE00, 'M', 'ا'), - (0x1EE01, 'M', 'ب'), - (0x1EE02, 'M', 'ج'), - (0x1EE03, 'M', 'د'), - (0x1EE04, 'X'), - (0x1EE05, 'M', 'و'), - (0x1EE06, 'M', 'ز'), - (0x1EE07, 'M', 'ح'), - (0x1EE08, 'M', 'ط'), - (0x1EE09, 'M', 'ي'), - (0x1EE0A, 'M', 'ك'), - (0x1EE0B, 'M', 'ل'), - (0x1EE0C, 'M', 'م'), - (0x1EE0D, 'M', 'ن'), - (0x1EE0E, 'M', 'س'), - (0x1EE0F, 'M', 'ع'), - (0x1EE10, 'M', 'ف'), - (0x1EE11, 'M', 'ص'), - (0x1EE12, 'M', 'ق'), - (0x1EE13, 'M', 'ر'), - (0x1EE14, 'M', 'ش'), - (0x1EE15, 'M', 'ت'), - (0x1EE16, 'M', 'ث'), - (0x1EE17, 'M', 'خ'), - (0x1EE18, 'M', 'ذ'), - (0x1EE19, 'M', 'ض'), - (0x1EE1A, 'M', 'ظ'), - (0x1EE1B, 'M', 'غ'), - (0x1EE1C, 'M', 'ٮ'), - (0x1EE1D, 'M', 'ں'), - (0x1EE1E, 'M', 'ڡ'), - (0x1EE1F, 'M', 'ٯ'), - (0x1EE20, 'X'), - (0x1EE21, 'M', 'ب'), - (0x1EE22, 'M', 'ج'), - (0x1EE23, 'X'), - (0x1EE24, 'M', 'ه'), - (0x1EE25, 'X'), - (0x1EE27, 'M', 'ح'), - (0x1EE28, 'X'), - (0x1EE29, 'M', 'ي'), - (0x1EE2A, 'M', 'ك'), - (0x1EE2B, 'M', 'ل'), - (0x1EE2C, 'M', 'م'), - (0x1EE2D, 'M', 'ن'), - (0x1EE2E, 'M', 'س'), - (0x1EE2F, 'M', 'ع'), - (0x1EE30, 'M', 'ف'), - (0x1EE31, 'M', 'ص'), - (0x1EE32, 'M', 'ق'), - (0x1EE33, 'X'), - (0x1EE34, 'M', 'ش'), - (0x1EE35, 'M', 'ت'), - (0x1EE36, 'M', 'ث'), - (0x1EE37, 'M', 'خ'), - (0x1EE38, 'X'), - (0x1EE39, 'M', 'ض'), - (0x1EE3A, 'X'), - (0x1EE3B, 'M', 'غ'), - (0x1EE3C, 'X'), - (0x1EE42, 'M', 'ج'), - (0x1EE43, 'X'), - (0x1EE47, 'M', 'ح'), - (0x1EE48, 'X'), - (0x1EE49, 'M', 'ي'), - (0x1EE4A, 'X'), - ] - -def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1EE4B, 'M', 'ل'), - (0x1EE4C, 'X'), - (0x1EE4D, 'M', 'ن'), - (0x1EE4E, 'M', 'س'), - (0x1EE4F, 'M', 'ع'), - (0x1EE50, 'X'), - (0x1EE51, 'M', 'ص'), - (0x1EE52, 'M', 'ق'), - (0x1EE53, 'X'), - (0x1EE54, 'M', 'ش'), - (0x1EE55, 'X'), - (0x1EE57, 'M', 'خ'), - (0x1EE58, 'X'), - (0x1EE59, 'M', 'ض'), - (0x1EE5A, 'X'), - (0x1EE5B, 'M', 'غ'), - (0x1EE5C, 'X'), - (0x1EE5D, 'M', 'ں'), - (0x1EE5E, 'X'), - (0x1EE5F, 'M', 'ٯ'), - (0x1EE60, 'X'), - (0x1EE61, 'M', 'ب'), - (0x1EE62, 'M', 'ج'), - (0x1EE63, 'X'), - (0x1EE64, 'M', 'ه'), - (0x1EE65, 'X'), - (0x1EE67, 'M', 'ح'), - (0x1EE68, 'M', 'ط'), - (0x1EE69, 'M', 'ي'), - (0x1EE6A, 'M', 'ك'), - (0x1EE6B, 'X'), - (0x1EE6C, 'M', 'م'), - (0x1EE6D, 'M', 'ن'), - (0x1EE6E, 'M', 'س'), - (0x1EE6F, 'M', 'ع'), - (0x1EE70, 'M', 'ف'), - (0x1EE71, 'M', 'ص'), - (0x1EE72, 'M', 'ق'), - (0x1EE73, 'X'), - (0x1EE74, 'M', 'ش'), - (0x1EE75, 'M', 'ت'), - (0x1EE76, 'M', 'ث'), - (0x1EE77, 'M', 'خ'), - (0x1EE78, 'X'), - (0x1EE79, 'M', 'ض'), - (0x1EE7A, 'M', 'ظ'), - (0x1EE7B, 'M', 'غ'), - (0x1EE7C, 'M', 'ٮ'), - (0x1EE7D, 'X'), - (0x1EE7E, 'M', 'ڡ'), - (0x1EE7F, 'X'), - (0x1EE80, 'M', 'ا'), - (0x1EE81, 'M', 'ب'), - (0x1EE82, 'M', 'ج'), - (0x1EE83, 'M', 'د'), - (0x1EE84, 'M', 'ه'), - (0x1EE85, 'M', 'و'), - (0x1EE86, 'M', 'ز'), - (0x1EE87, 'M', 'ح'), - (0x1EE88, 'M', 'ط'), - (0x1EE89, 'M', 'ي'), - (0x1EE8A, 'X'), - (0x1EE8B, 'M', 'ل'), - (0x1EE8C, 'M', 'م'), - (0x1EE8D, 'M', 'ن'), - (0x1EE8E, 'M', 'س'), - (0x1EE8F, 'M', 'ع'), - (0x1EE90, 'M', 'ف'), - (0x1EE91, 'M', 'ص'), - (0x1EE92, 'M', 'ق'), - (0x1EE93, 'M', 'ر'), - (0x1EE94, 'M', 'ش'), - (0x1EE95, 'M', 'ت'), - (0x1EE96, 'M', 'ث'), - (0x1EE97, 'M', 'خ'), - (0x1EE98, 'M', 'ذ'), - (0x1EE99, 'M', 'ض'), - (0x1EE9A, 'M', 'ظ'), - (0x1EE9B, 'M', 'غ'), - (0x1EE9C, 'X'), - (0x1EEA1, 'M', 'ب'), - (0x1EEA2, 'M', 'ج'), - (0x1EEA3, 'M', 'د'), - (0x1EEA4, 'X'), - (0x1EEA5, 'M', 'و'), - (0x1EEA6, 'M', 'ز'), - (0x1EEA7, 'M', 'ح'), - (0x1EEA8, 'M', 'ط'), - (0x1EEA9, 'M', 'ي'), - (0x1EEAA, 'X'), - (0x1EEAB, 'M', 'ل'), - (0x1EEAC, 'M', 'م'), - (0x1EEAD, 'M', 'ن'), - (0x1EEAE, 'M', 'س'), - (0x1EEAF, 'M', 'ع'), - (0x1EEB0, 'M', 'ف'), - (0x1EEB1, 'M', 'ص'), - (0x1EEB2, 'M', 'ق'), - (0x1EEB3, 'M', 'ر'), - (0x1EEB4, 'M', 'ش'), - ] - -def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1EEB5, 'M', 'ت'), - (0x1EEB6, 'M', 'ث'), - (0x1EEB7, 'M', 'خ'), - (0x1EEB8, 'M', 'ذ'), - (0x1EEB9, 'M', 'ض'), - (0x1EEBA, 'M', 'ظ'), - (0x1EEBB, 'M', 'غ'), - (0x1EEBC, 'X'), - (0x1EEF0, 'V'), - (0x1EEF2, 'X'), - (0x1F000, 'V'), - (0x1F02C, 'X'), - (0x1F030, 'V'), - (0x1F094, 'X'), - (0x1F0A0, 'V'), - (0x1F0AF, 'X'), - (0x1F0B1, 'V'), - (0x1F0C0, 'X'), - (0x1F0C1, 'V'), - (0x1F0D0, 'X'), - (0x1F0D1, 'V'), - (0x1F0F6, 'X'), - (0x1F101, '3', '0,'), - (0x1F102, '3', '1,'), - (0x1F103, '3', '2,'), - (0x1F104, '3', '3,'), - (0x1F105, '3', '4,'), - (0x1F106, '3', '5,'), - (0x1F107, '3', '6,'), - (0x1F108, '3', '7,'), - (0x1F109, '3', '8,'), - (0x1F10A, '3', '9,'), - (0x1F10B, 'V'), - (0x1F110, '3', '(a)'), - (0x1F111, '3', '(b)'), - (0x1F112, '3', '(c)'), - (0x1F113, '3', '(d)'), - (0x1F114, '3', '(e)'), - (0x1F115, '3', '(f)'), - (0x1F116, '3', '(g)'), - (0x1F117, '3', '(h)'), - (0x1F118, '3', '(i)'), - (0x1F119, '3', '(j)'), - (0x1F11A, '3', '(k)'), - (0x1F11B, '3', '(l)'), - (0x1F11C, '3', '(m)'), - (0x1F11D, '3', '(n)'), - (0x1F11E, '3', '(o)'), - (0x1F11F, '3', '(p)'), - (0x1F120, '3', '(q)'), - (0x1F121, '3', '(r)'), - (0x1F122, '3', '(s)'), - (0x1F123, '3', '(t)'), - (0x1F124, '3', '(u)'), - (0x1F125, '3', '(v)'), - (0x1F126, '3', '(w)'), - (0x1F127, '3', '(x)'), - (0x1F128, '3', '(y)'), - (0x1F129, '3', '(z)'), - (0x1F12A, 'M', '〔s〕'), - (0x1F12B, 'M', 'c'), - (0x1F12C, 'M', 'r'), - (0x1F12D, 'M', 'cd'), - (0x1F12E, 'M', 'wz'), - (0x1F12F, 'V'), - (0x1F130, 'M', 'a'), - (0x1F131, 'M', 'b'), - (0x1F132, 'M', 'c'), - (0x1F133, 'M', 'd'), - (0x1F134, 'M', 'e'), - (0x1F135, 'M', 'f'), - (0x1F136, 'M', 'g'), - (0x1F137, 'M', 'h'), - (0x1F138, 'M', 'i'), - (0x1F139, 'M', 'j'), - (0x1F13A, 'M', 'k'), - (0x1F13B, 'M', 'l'), - (0x1F13C, 'M', 'm'), - (0x1F13D, 'M', 'n'), - (0x1F13E, 'M', 'o'), - (0x1F13F, 'M', 'p'), - (0x1F140, 'M', 'q'), - (0x1F141, 'M', 'r'), - (0x1F142, 'M', 's'), - (0x1F143, 'M', 't'), - (0x1F144, 'M', 'u'), - (0x1F145, 'M', 'v'), - (0x1F146, 'M', 'w'), - (0x1F147, 'M', 'x'), - (0x1F148, 'M', 'y'), - (0x1F149, 'M', 'z'), - (0x1F14A, 'M', 'hv'), - (0x1F14B, 'M', 'mv'), - (0x1F14C, 'M', 'sd'), - (0x1F14D, 'M', 'ss'), - (0x1F14E, 'M', 'ppv'), - (0x1F14F, 'M', 'wc'), - (0x1F150, 'V'), - (0x1F16A, 'M', 'mc'), - (0x1F16B, 'M', 'md'), - ] - -def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1F16C, 'M', 'mr'), - (0x1F16D, 'V'), - (0x1F190, 'M', 'dj'), - (0x1F191, 'V'), - (0x1F1AE, 'X'), - (0x1F1E6, 'V'), - (0x1F200, 'M', 'ほか'), - (0x1F201, 'M', 'ココ'), - (0x1F202, 'M', 'サ'), - (0x1F203, 'X'), - (0x1F210, 'M', '手'), - (0x1F211, 'M', '字'), - (0x1F212, 'M', '双'), - (0x1F213, 'M', 'デ'), - (0x1F214, 'M', '二'), - (0x1F215, 'M', '多'), - (0x1F216, 'M', '解'), - (0x1F217, 'M', '天'), - (0x1F218, 'M', '交'), - (0x1F219, 'M', '映'), - (0x1F21A, 'M', '無'), - (0x1F21B, 'M', '料'), - (0x1F21C, 'M', '前'), - (0x1F21D, 'M', '後'), - (0x1F21E, 'M', '再'), - (0x1F21F, 'M', '新'), - (0x1F220, 'M', '初'), - (0x1F221, 'M', '終'), - (0x1F222, 'M', '生'), - (0x1F223, 'M', '販'), - (0x1F224, 'M', '声'), - (0x1F225, 'M', '吹'), - (0x1F226, 'M', '演'), - (0x1F227, 'M', '投'), - (0x1F228, 'M', '捕'), - (0x1F229, 'M', '一'), - (0x1F22A, 'M', '三'), - (0x1F22B, 'M', '遊'), - (0x1F22C, 'M', '左'), - (0x1F22D, 'M', '中'), - (0x1F22E, 'M', '右'), - (0x1F22F, 'M', '指'), - (0x1F230, 'M', '走'), - (0x1F231, 'M', '打'), - (0x1F232, 'M', '禁'), - (0x1F233, 'M', '空'), - (0x1F234, 'M', '合'), - (0x1F235, 'M', '満'), - (0x1F236, 'M', '有'), - (0x1F237, 'M', '月'), - (0x1F238, 'M', '申'), - (0x1F239, 'M', '割'), - (0x1F23A, 'M', '営'), - (0x1F23B, 'M', '配'), - (0x1F23C, 'X'), - (0x1F240, 'M', '〔本〕'), - (0x1F241, 'M', '〔三〕'), - (0x1F242, 'M', '〔二〕'), - (0x1F243, 'M', '〔安〕'), - (0x1F244, 'M', '〔点〕'), - (0x1F245, 'M', '〔打〕'), - (0x1F246, 'M', '〔盗〕'), - (0x1F247, 'M', '〔勝〕'), - (0x1F248, 'M', '〔敗〕'), - (0x1F249, 'X'), - (0x1F250, 'M', '得'), - (0x1F251, 'M', '可'), - (0x1F252, 'X'), - (0x1F260, 'V'), - (0x1F266, 'X'), - (0x1F300, 'V'), - (0x1F6D8, 'X'), - (0x1F6DC, 'V'), - (0x1F6ED, 'X'), - (0x1F6F0, 'V'), - (0x1F6FD, 'X'), - (0x1F700, 'V'), - (0x1F777, 'X'), - (0x1F77B, 'V'), - (0x1F7DA, 'X'), - (0x1F7E0, 'V'), - (0x1F7EC, 'X'), - (0x1F7F0, 'V'), - (0x1F7F1, 'X'), - (0x1F800, 'V'), - (0x1F80C, 'X'), - (0x1F810, 'V'), - (0x1F848, 'X'), - (0x1F850, 'V'), - (0x1F85A, 'X'), - (0x1F860, 'V'), - (0x1F888, 'X'), - (0x1F890, 'V'), - (0x1F8AE, 'X'), - (0x1F8B0, 'V'), - (0x1F8B2, 'X'), - (0x1F900, 'V'), - (0x1FA54, 'X'), - (0x1FA60, 'V'), - (0x1FA6E, 'X'), - ] - -def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1FA70, 'V'), - (0x1FA7D, 'X'), - (0x1FA80, 'V'), - (0x1FA89, 'X'), - (0x1FA90, 'V'), - (0x1FABE, 'X'), - (0x1FABF, 'V'), - (0x1FAC6, 'X'), - (0x1FACE, 'V'), - (0x1FADC, 'X'), - (0x1FAE0, 'V'), - (0x1FAE9, 'X'), - (0x1FAF0, 'V'), - (0x1FAF9, 'X'), - (0x1FB00, 'V'), - (0x1FB93, 'X'), - (0x1FB94, 'V'), - (0x1FBCB, 'X'), - (0x1FBF0, 'M', '0'), - (0x1FBF1, 'M', '1'), - (0x1FBF2, 'M', '2'), - (0x1FBF3, 'M', '3'), - (0x1FBF4, 'M', '4'), - (0x1FBF5, 'M', '5'), - (0x1FBF6, 'M', '6'), - (0x1FBF7, 'M', '7'), - (0x1FBF8, 'M', '8'), - (0x1FBF9, 'M', '9'), - (0x1FBFA, 'X'), - (0x20000, 'V'), - (0x2A6E0, 'X'), - (0x2A700, 'V'), - (0x2B73A, 'X'), - (0x2B740, 'V'), - (0x2B81E, 'X'), - (0x2B820, 'V'), - (0x2CEA2, 'X'), - (0x2CEB0, 'V'), - (0x2EBE1, 'X'), - (0x2F800, 'M', '丽'), - (0x2F801, 'M', '丸'), - (0x2F802, 'M', '乁'), - (0x2F803, 'M', '𠄢'), - (0x2F804, 'M', '你'), - (0x2F805, 'M', '侮'), - (0x2F806, 'M', '侻'), - (0x2F807, 'M', '倂'), - (0x2F808, 'M', '偺'), - (0x2F809, 'M', '備'), - (0x2F80A, 'M', '僧'), - (0x2F80B, 'M', '像'), - (0x2F80C, 'M', '㒞'), - (0x2F80D, 'M', '𠘺'), - (0x2F80E, 'M', '免'), - (0x2F80F, 'M', '兔'), - (0x2F810, 'M', '兤'), - (0x2F811, 'M', '具'), - (0x2F812, 'M', '𠔜'), - (0x2F813, 'M', '㒹'), - (0x2F814, 'M', '內'), - (0x2F815, 'M', '再'), - (0x2F816, 'M', '𠕋'), - (0x2F817, 'M', '冗'), - (0x2F818, 'M', '冤'), - (0x2F819, 'M', '仌'), - (0x2F81A, 'M', '冬'), - (0x2F81B, 'M', '况'), - (0x2F81C, 'M', '𩇟'), - (0x2F81D, 'M', '凵'), - (0x2F81E, 'M', '刃'), - (0x2F81F, 'M', '㓟'), - (0x2F820, 'M', '刻'), - (0x2F821, 'M', '剆'), - (0x2F822, 'M', '割'), - (0x2F823, 'M', '剷'), - (0x2F824, 'M', '㔕'), - (0x2F825, 'M', '勇'), - (0x2F826, 'M', '勉'), - (0x2F827, 'M', '勤'), - (0x2F828, 'M', '勺'), - (0x2F829, 'M', '包'), - (0x2F82A, 'M', '匆'), - (0x2F82B, 'M', '北'), - (0x2F82C, 'M', '卉'), - (0x2F82D, 'M', '卑'), - (0x2F82E, 'M', '博'), - (0x2F82F, 'M', '即'), - (0x2F830, 'M', '卽'), - (0x2F831, 'M', '卿'), - (0x2F834, 'M', '𠨬'), - (0x2F835, 'M', '灰'), - (0x2F836, 'M', '及'), - (0x2F837, 'M', '叟'), - (0x2F838, 'M', '𠭣'), - (0x2F839, 'M', '叫'), - (0x2F83A, 'M', '叱'), - (0x2F83B, 'M', '吆'), - (0x2F83C, 'M', '咞'), - (0x2F83D, 'M', '吸'), - (0x2F83E, 'M', '呈'), - ] - -def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F83F, 'M', '周'), - (0x2F840, 'M', '咢'), - (0x2F841, 'M', '哶'), - (0x2F842, 'M', '唐'), - (0x2F843, 'M', '啓'), - (0x2F844, 'M', '啣'), - (0x2F845, 'M', '善'), - (0x2F847, 'M', '喙'), - (0x2F848, 'M', '喫'), - (0x2F849, 'M', '喳'), - (0x2F84A, 'M', '嗂'), - (0x2F84B, 'M', '圖'), - (0x2F84C, 'M', '嘆'), - (0x2F84D, 'M', '圗'), - (0x2F84E, 'M', '噑'), - (0x2F84F, 'M', '噴'), - (0x2F850, 'M', '切'), - (0x2F851, 'M', '壮'), - (0x2F852, 'M', '城'), - (0x2F853, 'M', '埴'), - (0x2F854, 'M', '堍'), - (0x2F855, 'M', '型'), - (0x2F856, 'M', '堲'), - (0x2F857, 'M', '報'), - (0x2F858, 'M', '墬'), - (0x2F859, 'M', '𡓤'), - (0x2F85A, 'M', '売'), - (0x2F85B, 'M', '壷'), - (0x2F85C, 'M', '夆'), - (0x2F85D, 'M', '多'), - (0x2F85E, 'M', '夢'), - (0x2F85F, 'M', '奢'), - (0x2F860, 'M', '𡚨'), - (0x2F861, 'M', '𡛪'), - (0x2F862, 'M', '姬'), - (0x2F863, 'M', '娛'), - (0x2F864, 'M', '娧'), - (0x2F865, 'M', '姘'), - (0x2F866, 'M', '婦'), - (0x2F867, 'M', '㛮'), - (0x2F868, 'X'), - (0x2F869, 'M', '嬈'), - (0x2F86A, 'M', '嬾'), - (0x2F86C, 'M', '𡧈'), - (0x2F86D, 'M', '寃'), - (0x2F86E, 'M', '寘'), - (0x2F86F, 'M', '寧'), - (0x2F870, 'M', '寳'), - (0x2F871, 'M', '𡬘'), - (0x2F872, 'M', '寿'), - (0x2F873, 'M', '将'), - (0x2F874, 'X'), - (0x2F875, 'M', '尢'), - (0x2F876, 'M', '㞁'), - (0x2F877, 'M', '屠'), - (0x2F878, 'M', '屮'), - (0x2F879, 'M', '峀'), - (0x2F87A, 'M', '岍'), - (0x2F87B, 'M', '𡷤'), - (0x2F87C, 'M', '嵃'), - (0x2F87D, 'M', '𡷦'), - (0x2F87E, 'M', '嵮'), - (0x2F87F, 'M', '嵫'), - (0x2F880, 'M', '嵼'), - (0x2F881, 'M', '巡'), - (0x2F882, 'M', '巢'), - (0x2F883, 'M', '㠯'), - (0x2F884, 'M', '巽'), - (0x2F885, 'M', '帨'), - (0x2F886, 'M', '帽'), - (0x2F887, 'M', '幩'), - (0x2F888, 'M', '㡢'), - (0x2F889, 'M', '𢆃'), - (0x2F88A, 'M', '㡼'), - (0x2F88B, 'M', '庰'), - (0x2F88C, 'M', '庳'), - (0x2F88D, 'M', '庶'), - (0x2F88E, 'M', '廊'), - (0x2F88F, 'M', '𪎒'), - (0x2F890, 'M', '廾'), - (0x2F891, 'M', '𢌱'), - (0x2F893, 'M', '舁'), - (0x2F894, 'M', '弢'), - (0x2F896, 'M', '㣇'), - (0x2F897, 'M', '𣊸'), - (0x2F898, 'M', '𦇚'), - (0x2F899, 'M', '形'), - (0x2F89A, 'M', '彫'), - (0x2F89B, 'M', '㣣'), - (0x2F89C, 'M', '徚'), - (0x2F89D, 'M', '忍'), - (0x2F89E, 'M', '志'), - (0x2F89F, 'M', '忹'), - (0x2F8A0, 'M', '悁'), - (0x2F8A1, 'M', '㤺'), - (0x2F8A2, 'M', '㤜'), - (0x2F8A3, 'M', '悔'), - (0x2F8A4, 'M', '𢛔'), - (0x2F8A5, 'M', '惇'), - (0x2F8A6, 'M', '慈'), - ] - -def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F8A7, 'M', '慌'), - (0x2F8A8, 'M', '慎'), - (0x2F8A9, 'M', '慌'), - (0x2F8AA, 'M', '慺'), - (0x2F8AB, 'M', '憎'), - (0x2F8AC, 'M', '憲'), - (0x2F8AD, 'M', '憤'), - (0x2F8AE, 'M', '憯'), - (0x2F8AF, 'M', '懞'), - (0x2F8B0, 'M', '懲'), - (0x2F8B1, 'M', '懶'), - (0x2F8B2, 'M', '成'), - (0x2F8B3, 'M', '戛'), - (0x2F8B4, 'M', '扝'), - (0x2F8B5, 'M', '抱'), - (0x2F8B6, 'M', '拔'), - (0x2F8B7, 'M', '捐'), - (0x2F8B8, 'M', '𢬌'), - (0x2F8B9, 'M', '挽'), - (0x2F8BA, 'M', '拼'), - (0x2F8BB, 'M', '捨'), - (0x2F8BC, 'M', '掃'), - (0x2F8BD, 'M', '揤'), - (0x2F8BE, 'M', '𢯱'), - (0x2F8BF, 'M', '搢'), - (0x2F8C0, 'M', '揅'), - (0x2F8C1, 'M', '掩'), - (0x2F8C2, 'M', '㨮'), - (0x2F8C3, 'M', '摩'), - (0x2F8C4, 'M', '摾'), - (0x2F8C5, 'M', '撝'), - (0x2F8C6, 'M', '摷'), - (0x2F8C7, 'M', '㩬'), - (0x2F8C8, 'M', '敏'), - (0x2F8C9, 'M', '敬'), - (0x2F8CA, 'M', '𣀊'), - (0x2F8CB, 'M', '旣'), - (0x2F8CC, 'M', '書'), - (0x2F8CD, 'M', '晉'), - (0x2F8CE, 'M', '㬙'), - (0x2F8CF, 'M', '暑'), - (0x2F8D0, 'M', '㬈'), - (0x2F8D1, 'M', '㫤'), - (0x2F8D2, 'M', '冒'), - (0x2F8D3, 'M', '冕'), - (0x2F8D4, 'M', '最'), - (0x2F8D5, 'M', '暜'), - (0x2F8D6, 'M', '肭'), - (0x2F8D7, 'M', '䏙'), - (0x2F8D8, 'M', '朗'), - (0x2F8D9, 'M', '望'), - (0x2F8DA, 'M', '朡'), - (0x2F8DB, 'M', '杞'), - (0x2F8DC, 'M', '杓'), - (0x2F8DD, 'M', '𣏃'), - (0x2F8DE, 'M', '㭉'), - (0x2F8DF, 'M', '柺'), - (0x2F8E0, 'M', '枅'), - (0x2F8E1, 'M', '桒'), - (0x2F8E2, 'M', '梅'), - (0x2F8E3, 'M', '𣑭'), - (0x2F8E4, 'M', '梎'), - (0x2F8E5, 'M', '栟'), - (0x2F8E6, 'M', '椔'), - (0x2F8E7, 'M', '㮝'), - (0x2F8E8, 'M', '楂'), - (0x2F8E9, 'M', '榣'), - (0x2F8EA, 'M', '槪'), - (0x2F8EB, 'M', '檨'), - (0x2F8EC, 'M', '𣚣'), - (0x2F8ED, 'M', '櫛'), - (0x2F8EE, 'M', '㰘'), - (0x2F8EF, 'M', '次'), - (0x2F8F0, 'M', '𣢧'), - (0x2F8F1, 'M', '歔'), - (0x2F8F2, 'M', '㱎'), - (0x2F8F3, 'M', '歲'), - (0x2F8F4, 'M', '殟'), - (0x2F8F5, 'M', '殺'), - (0x2F8F6, 'M', '殻'), - (0x2F8F7, 'M', '𣪍'), - (0x2F8F8, 'M', '𡴋'), - (0x2F8F9, 'M', '𣫺'), - (0x2F8FA, 'M', '汎'), - (0x2F8FB, 'M', '𣲼'), - (0x2F8FC, 'M', '沿'), - (0x2F8FD, 'M', '泍'), - (0x2F8FE, 'M', '汧'), - (0x2F8FF, 'M', '洖'), - (0x2F900, 'M', '派'), - (0x2F901, 'M', '海'), - (0x2F902, 'M', '流'), - (0x2F903, 'M', '浩'), - (0x2F904, 'M', '浸'), - (0x2F905, 'M', '涅'), - (0x2F906, 'M', '𣴞'), - (0x2F907, 'M', '洴'), - (0x2F908, 'M', '港'), - (0x2F909, 'M', '湮'), - (0x2F90A, 'M', '㴳'), - ] - -def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F90B, 'M', '滋'), - (0x2F90C, 'M', '滇'), - (0x2F90D, 'M', '𣻑'), - (0x2F90E, 'M', '淹'), - (0x2F90F, 'M', '潮'), - (0x2F910, 'M', '𣽞'), - (0x2F911, 'M', '𣾎'), - (0x2F912, 'M', '濆'), - (0x2F913, 'M', '瀹'), - (0x2F914, 'M', '瀞'), - (0x2F915, 'M', '瀛'), - (0x2F916, 'M', '㶖'), - (0x2F917, 'M', '灊'), - (0x2F918, 'M', '災'), - (0x2F919, 'M', '灷'), - (0x2F91A, 'M', '炭'), - (0x2F91B, 'M', '𠔥'), - (0x2F91C, 'M', '煅'), - (0x2F91D, 'M', '𤉣'), - (0x2F91E, 'M', '熜'), - (0x2F91F, 'X'), - (0x2F920, 'M', '爨'), - (0x2F921, 'M', '爵'), - (0x2F922, 'M', '牐'), - (0x2F923, 'M', '𤘈'), - (0x2F924, 'M', '犀'), - (0x2F925, 'M', '犕'), - (0x2F926, 'M', '𤜵'), - (0x2F927, 'M', '𤠔'), - (0x2F928, 'M', '獺'), - (0x2F929, 'M', '王'), - (0x2F92A, 'M', '㺬'), - (0x2F92B, 'M', '玥'), - (0x2F92C, 'M', '㺸'), - (0x2F92E, 'M', '瑇'), - (0x2F92F, 'M', '瑜'), - (0x2F930, 'M', '瑱'), - (0x2F931, 'M', '璅'), - (0x2F932, 'M', '瓊'), - (0x2F933, 'M', '㼛'), - (0x2F934, 'M', '甤'), - (0x2F935, 'M', '𤰶'), - (0x2F936, 'M', '甾'), - (0x2F937, 'M', '𤲒'), - (0x2F938, 'M', '異'), - (0x2F939, 'M', '𢆟'), - (0x2F93A, 'M', '瘐'), - (0x2F93B, 'M', '𤾡'), - (0x2F93C, 'M', '𤾸'), - (0x2F93D, 'M', '𥁄'), - (0x2F93E, 'M', '㿼'), - (0x2F93F, 'M', '䀈'), - (0x2F940, 'M', '直'), - (0x2F941, 'M', '𥃳'), - (0x2F942, 'M', '𥃲'), - (0x2F943, 'M', '𥄙'), - (0x2F944, 'M', '𥄳'), - (0x2F945, 'M', '眞'), - (0x2F946, 'M', '真'), - (0x2F948, 'M', '睊'), - (0x2F949, 'M', '䀹'), - (0x2F94A, 'M', '瞋'), - (0x2F94B, 'M', '䁆'), - (0x2F94C, 'M', '䂖'), - (0x2F94D, 'M', '𥐝'), - (0x2F94E, 'M', '硎'), - (0x2F94F, 'M', '碌'), - (0x2F950, 'M', '磌'), - (0x2F951, 'M', '䃣'), - (0x2F952, 'M', '𥘦'), - (0x2F953, 'M', '祖'), - (0x2F954, 'M', '𥚚'), - (0x2F955, 'M', '𥛅'), - (0x2F956, 'M', '福'), - (0x2F957, 'M', '秫'), - (0x2F958, 'M', '䄯'), - (0x2F959, 'M', '穀'), - (0x2F95A, 'M', '穊'), - (0x2F95B, 'M', '穏'), - (0x2F95C, 'M', '𥥼'), - (0x2F95D, 'M', '𥪧'), - (0x2F95F, 'X'), - (0x2F960, 'M', '䈂'), - (0x2F961, 'M', '𥮫'), - (0x2F962, 'M', '篆'), - (0x2F963, 'M', '築'), - (0x2F964, 'M', '䈧'), - (0x2F965, 'M', '𥲀'), - (0x2F966, 'M', '糒'), - (0x2F967, 'M', '䊠'), - (0x2F968, 'M', '糨'), - (0x2F969, 'M', '糣'), - (0x2F96A, 'M', '紀'), - (0x2F96B, 'M', '𥾆'), - (0x2F96C, 'M', '絣'), - (0x2F96D, 'M', '䌁'), - (0x2F96E, 'M', '緇'), - (0x2F96F, 'M', '縂'), - (0x2F970, 'M', '繅'), - (0x2F971, 'M', '䌴'), - ] - -def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F972, 'M', '𦈨'), - (0x2F973, 'M', '𦉇'), - (0x2F974, 'M', '䍙'), - (0x2F975, 'M', '𦋙'), - (0x2F976, 'M', '罺'), - (0x2F977, 'M', '𦌾'), - (0x2F978, 'M', '羕'), - (0x2F979, 'M', '翺'), - (0x2F97A, 'M', '者'), - (0x2F97B, 'M', '𦓚'), - (0x2F97C, 'M', '𦔣'), - (0x2F97D, 'M', '聠'), - (0x2F97E, 'M', '𦖨'), - (0x2F97F, 'M', '聰'), - (0x2F980, 'M', '𣍟'), - (0x2F981, 'M', '䏕'), - (0x2F982, 'M', '育'), - (0x2F983, 'M', '脃'), - (0x2F984, 'M', '䐋'), - (0x2F985, 'M', '脾'), - (0x2F986, 'M', '媵'), - (0x2F987, 'M', '𦞧'), - (0x2F988, 'M', '𦞵'), - (0x2F989, 'M', '𣎓'), - (0x2F98A, 'M', '𣎜'), - (0x2F98B, 'M', '舁'), - (0x2F98C, 'M', '舄'), - (0x2F98D, 'M', '辞'), - (0x2F98E, 'M', '䑫'), - (0x2F98F, 'M', '芑'), - (0x2F990, 'M', '芋'), - (0x2F991, 'M', '芝'), - (0x2F992, 'M', '劳'), - (0x2F993, 'M', '花'), - (0x2F994, 'M', '芳'), - (0x2F995, 'M', '芽'), - (0x2F996, 'M', '苦'), - (0x2F997, 'M', '𦬼'), - (0x2F998, 'M', '若'), - (0x2F999, 'M', '茝'), - (0x2F99A, 'M', '荣'), - (0x2F99B, 'M', '莭'), - (0x2F99C, 'M', '茣'), - (0x2F99D, 'M', '莽'), - (0x2F99E, 'M', '菧'), - (0x2F99F, 'M', '著'), - (0x2F9A0, 'M', '荓'), - (0x2F9A1, 'M', '菊'), - (0x2F9A2, 'M', '菌'), - (0x2F9A3, 'M', '菜'), - (0x2F9A4, 'M', '𦰶'), - (0x2F9A5, 'M', '𦵫'), - (0x2F9A6, 'M', '𦳕'), - (0x2F9A7, 'M', '䔫'), - (0x2F9A8, 'M', '蓱'), - (0x2F9A9, 'M', '蓳'), - (0x2F9AA, 'M', '蔖'), - (0x2F9AB, 'M', '𧏊'), - (0x2F9AC, 'M', '蕤'), - (0x2F9AD, 'M', '𦼬'), - (0x2F9AE, 'M', '䕝'), - (0x2F9AF, 'M', '䕡'), - (0x2F9B0, 'M', '𦾱'), - (0x2F9B1, 'M', '𧃒'), - (0x2F9B2, 'M', '䕫'), - (0x2F9B3, 'M', '虐'), - (0x2F9B4, 'M', '虜'), - (0x2F9B5, 'M', '虧'), - (0x2F9B6, 'M', '虩'), - (0x2F9B7, 'M', '蚩'), - (0x2F9B8, 'M', '蚈'), - (0x2F9B9, 'M', '蜎'), - (0x2F9BA, 'M', '蛢'), - (0x2F9BB, 'M', '蝹'), - (0x2F9BC, 'M', '蜨'), - (0x2F9BD, 'M', '蝫'), - (0x2F9BE, 'M', '螆'), - (0x2F9BF, 'X'), - (0x2F9C0, 'M', '蟡'), - (0x2F9C1, 'M', '蠁'), - (0x2F9C2, 'M', '䗹'), - (0x2F9C3, 'M', '衠'), - (0x2F9C4, 'M', '衣'), - (0x2F9C5, 'M', '𧙧'), - (0x2F9C6, 'M', '裗'), - (0x2F9C7, 'M', '裞'), - (0x2F9C8, 'M', '䘵'), - (0x2F9C9, 'M', '裺'), - (0x2F9CA, 'M', '㒻'), - (0x2F9CB, 'M', '𧢮'), - (0x2F9CC, 'M', '𧥦'), - (0x2F9CD, 'M', '䚾'), - (0x2F9CE, 'M', '䛇'), - (0x2F9CF, 'M', '誠'), - (0x2F9D0, 'M', '諭'), - (0x2F9D1, 'M', '變'), - (0x2F9D2, 'M', '豕'), - (0x2F9D3, 'M', '𧲨'), - (0x2F9D4, 'M', '貫'), - (0x2F9D5, 'M', '賁'), - ] - -def _seg_81() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F9D6, 'M', '贛'), - (0x2F9D7, 'M', '起'), - (0x2F9D8, 'M', '𧼯'), - (0x2F9D9, 'M', '𠠄'), - (0x2F9DA, 'M', '跋'), - (0x2F9DB, 'M', '趼'), - (0x2F9DC, 'M', '跰'), - (0x2F9DD, 'M', '𠣞'), - (0x2F9DE, 'M', '軔'), - (0x2F9DF, 'M', '輸'), - (0x2F9E0, 'M', '𨗒'), - (0x2F9E1, 'M', '𨗭'), - (0x2F9E2, 'M', '邔'), - (0x2F9E3, 'M', '郱'), - (0x2F9E4, 'M', '鄑'), - (0x2F9E5, 'M', '𨜮'), - (0x2F9E6, 'M', '鄛'), - (0x2F9E7, 'M', '鈸'), - (0x2F9E8, 'M', '鋗'), - (0x2F9E9, 'M', '鋘'), - (0x2F9EA, 'M', '鉼'), - (0x2F9EB, 'M', '鏹'), - (0x2F9EC, 'M', '鐕'), - (0x2F9ED, 'M', '𨯺'), - (0x2F9EE, 'M', '開'), - (0x2F9EF, 'M', '䦕'), - (0x2F9F0, 'M', '閷'), - (0x2F9F1, 'M', '𨵷'), - (0x2F9F2, 'M', '䧦'), - (0x2F9F3, 'M', '雃'), - (0x2F9F4, 'M', '嶲'), - (0x2F9F5, 'M', '霣'), - (0x2F9F6, 'M', '𩅅'), - (0x2F9F7, 'M', '𩈚'), - (0x2F9F8, 'M', '䩮'), - (0x2F9F9, 'M', '䩶'), - (0x2F9FA, 'M', '韠'), - (0x2F9FB, 'M', '𩐊'), - (0x2F9FC, 'M', '䪲'), - (0x2F9FD, 'M', '𩒖'), - (0x2F9FE, 'M', '頋'), - (0x2FA00, 'M', '頩'), - (0x2FA01, 'M', '𩖶'), - (0x2FA02, 'M', '飢'), - (0x2FA03, 'M', '䬳'), - (0x2FA04, 'M', '餩'), - (0x2FA05, 'M', '馧'), - (0x2FA06, 'M', '駂'), - (0x2FA07, 'M', '駾'), - (0x2FA08, 'M', '䯎'), - (0x2FA09, 'M', '𩬰'), - (0x2FA0A, 'M', '鬒'), - (0x2FA0B, 'M', '鱀'), - (0x2FA0C, 'M', '鳽'), - (0x2FA0D, 'M', '䳎'), - (0x2FA0E, 'M', '䳭'), - (0x2FA0F, 'M', '鵧'), - (0x2FA10, 'M', '𪃎'), - (0x2FA11, 'M', '䳸'), - (0x2FA12, 'M', '𪄅'), - (0x2FA13, 'M', '𪈎'), - (0x2FA14, 'M', '𪊑'), - (0x2FA15, 'M', '麻'), - (0x2FA16, 'M', '䵖'), - (0x2FA17, 'M', '黹'), - (0x2FA18, 'M', '黾'), - (0x2FA19, 'M', '鼅'), - (0x2FA1A, 'M', '鼏'), - (0x2FA1B, 'M', '鼖'), - (0x2FA1C, 'M', '鼻'), - (0x2FA1D, 'M', '𪘀'), - (0x2FA1E, 'X'), - (0x30000, 'V'), - (0x3134B, 'X'), - (0x31350, 'V'), - (0x323B0, 'X'), - (0xE0100, 'I'), - (0xE01F0, 'X'), - ] - -uts46data = tuple( - _seg_0() - + _seg_1() - + _seg_2() - + _seg_3() - + _seg_4() - + _seg_5() - + _seg_6() - + _seg_7() - + _seg_8() - + _seg_9() - + _seg_10() - + _seg_11() - + _seg_12() - + _seg_13() - + _seg_14() - + _seg_15() - + _seg_16() - + _seg_17() - + _seg_18() - + _seg_19() - + _seg_20() - + _seg_21() - + _seg_22() - + _seg_23() - + _seg_24() - + _seg_25() - + _seg_26() - + _seg_27() - + _seg_28() - + _seg_29() - + _seg_30() - + _seg_31() - + _seg_32() - + _seg_33() - + _seg_34() - + _seg_35() - + _seg_36() - + _seg_37() - + _seg_38() - + _seg_39() - + _seg_40() - + _seg_41() - + _seg_42() - + _seg_43() - + _seg_44() - + _seg_45() - + _seg_46() - + _seg_47() - + _seg_48() - + _seg_49() - + _seg_50() - + _seg_51() - + _seg_52() - + _seg_53() - + _seg_54() - + _seg_55() - + _seg_56() - + _seg_57() - + _seg_58() - + _seg_59() - + _seg_60() - + _seg_61() - + _seg_62() - + _seg_63() - + _seg_64() - + _seg_65() - + _seg_66() - + _seg_67() - + _seg_68() - + _seg_69() - + _seg_70() - + _seg_71() - + _seg_72() - + _seg_73() - + _seg_74() - + _seg_75() - + _seg_76() - + _seg_77() - + _seg_78() - + _seg_79() - + _seg_80() - + _seg_81() -) # type: Tuple[Union[Tuple[int, str], Tuple[int, str, str]], ...] diff --git a/spaces/BilalSardar/AutoML-Model-Training/app.py b/spaces/BilalSardar/AutoML-Model-Training/app.py deleted file mode 100644 index 3b9e0fa841e2d4ae94d933d3ba3d9ca7a4c705de..0000000000000000000000000000000000000000 --- a/spaces/BilalSardar/AutoML-Model-Training/app.py +++ /dev/null @@ -1,45 +0,0 @@ -from operator import index -import streamlit as st -import plotly.express as px -from pycaret.regression import setup, compare_models, pull, save_model, load_model -import pandas_profiling -import pandas as pd -from streamlit_pandas_profiling import st_profile_report -import os - -if os.path.exists('./dataset.csv'): - df = pd.read_csv('dataset.csv', index_col=None) - -with st.sidebar: - st.image("https://www.onepointltd.com/wp-content/uploads/2020/03/inno2.png") - st.title("AutoBaliML") - choice = st.radio("Navigation", ["Upload","Profiling","Modelling", "Download"]) - st.info("This project application helps you build and explore your data.") - -if choice == "Upload": - st.title("Upload Your Dataset") - file = st.file_uploader("Upload Your Dataset") - if file: - df = pd.read_csv(file, index_col=None) - df.to_csv('dataset.csv', index=None) - st.dataframe(df) - -if choice == "Profiling": - st.title("Exploratory Data Analysis") - profile_df = df.profile_report() - st_profile_report(profile_df) - -if choice == "Modelling": - chosen_target = st.selectbox('Choose the Target Column', df.columns) - if st.button('Run Modelling'): - setup(df, target=chosen_target, silent=True) - setup_df = pull() - st.dataframe(setup_df) - best_model = compare_models() - compare_df = pull() - st.dataframe(compare_df) - save_model(best_model, 'best_model') - -if choice == "Download": - with open('best_model.pkl', 'rb') as f: - st.download_button('Download Model', f, file_name="best_model.pkl") \ No newline at end of file diff --git a/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/any_system_tag.h b/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/any_system_tag.h deleted file mode 100644 index 27640b5e0dd83881cbd16d19229c409307bc7da8..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/any_system_tag.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include - -namespace thrust -{ - -struct any_system_tag - : thrust::execution_policy -{ - // allow any_system_tag to convert to any type at all - // XXX make this safer using enable_if> upon c++11 - template operator T () const {return T();} -}; - -} // end thrust - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/async/customization.h b/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/async/customization.h deleted file mode 100644 index eb52c2cf02ce26f2083e8f39f436deb1a884a0dd..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/async/customization.h +++ /dev/null @@ -1,128 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -// TODO: Move into system::cuda - -#pragma once - -#include -#include - -#if THRUST_CPP_DIALECT >= 2014 - -#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace thrust -{ - -namespace system { namespace cuda { namespace detail -{ - -using default_async_host_resource = - thrust::mr::synchronized_pool_resource< - thrust::host_memory_resource - >; - -template -auto get_async_host_allocator( - thrust::detail::execution_policy_base& -) -THRUST_RETURNS( - thrust::mr::stateless_resource_allocator< - thrust::detail::uint8_t, default_async_host_resource - >{} -) - -/////////////////////////////////////////////////////////////////////////////// - -using default_async_device_resource = - thrust::mr::disjoint_synchronized_pool_resource< - thrust::system::cuda::memory_resource - , thrust::mr::new_delete_resource - >; - -template -auto get_async_device_allocator( - thrust::detail::execution_policy_base& -) -THRUST_RETURNS( - thrust::per_device_allocator< - thrust::detail::uint8_t, default_async_device_resource, par_t - >{} -) - -template class BaseSystem> -auto get_async_device_allocator( - thrust::detail::execute_with_allocator& exec -) -THRUST_RETURNS(exec.get_allocator()) - -template class BaseSystem> -auto get_async_device_allocator( - thrust::detail::execute_with_allocator_and_dependencies< - Allocator, BaseSystem - >& exec -) -THRUST_RETURNS(exec.get_allocator()) - -/////////////////////////////////////////////////////////////////////////////// - -using default_async_universal_host_pinned_resource = - thrust::mr::synchronized_pool_resource< - thrust::system::cuda::universal_host_pinned_memory_resource - >; - -template -auto get_async_universal_host_pinned_allocator( - thrust::detail::execution_policy_base& -) -THRUST_RETURNS( - thrust::mr::stateless_resource_allocator< - thrust::detail::uint8_t, default_async_universal_host_pinned_resource - >{} -) - -}}} // namespace system::cuda::detail - -} // end namespace thrust - -#endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC - -#endif - diff --git a/spaces/CVPR/VizWiz-CLIP-VQA/model/vqa_model.py b/spaces/CVPR/VizWiz-CLIP-VQA/model/vqa_model.py deleted file mode 100644 index c684a5daaa4348415d11b1ad41e968ef4f717467..0000000000000000000000000000000000000000 --- a/spaces/CVPR/VizWiz-CLIP-VQA/model/vqa_model.py +++ /dev/null @@ -1,123 +0,0 @@ -import torch - -class HeadVQA(torch.nn.Module): - def __init__(self, train_config): - super().__init__() - - embedding_size = {'RN50': 1024, - 'RN101': 512, - 'RN50x4': 640, - 'RN50x16': 768, - 'RN50x64': 1024, - 'ViT-B/32': 512, - 'ViT-B/16': 512, - 'ViT-L/14': 768, - 'ViT-L/14@336px': 768} - - n_aux_classes = len(set(train_config.aux_mapping.values())) - - self.ln1 = torch.nn.LayerNorm(embedding_size[train_config.model]*2) - self.dp1 = torch.nn.Dropout(0.5) - self.fc1 = torch.nn.Linear(embedding_size[train_config.model] * 2, 512) - - self.ln2 = torch.nn.LayerNorm(512) - self.dp2 = torch.nn.Dropout(0.5) - self.fc2 = torch.nn.Linear(512, train_config.n_classes) - - self.fc_aux = torch.nn.Linear(512, n_aux_classes) - self.fc_gate = torch.nn.Linear(n_aux_classes, train_config.n_classes) - self.act_gate = torch.nn.Sigmoid() - - - def forward(self, img_features, question_features): - xc = torch.cat((img_features, question_features), dim=-1) - - x = self.ln1(xc) - x = self.dp1(x) - x = self.fc1(x) - - aux = self.fc_aux(x) - - gate = self.fc_gate(aux) - gate = self.act_gate(gate) - - x = self.ln2(x) - x = self.dp2(x) - vqa = self.fc2(x) - - output = vqa * gate - - return output, aux - - -class NetVQA(torch.nn.Module): - def __init__(self, train_config): - super().__init__() - - self.heads = torch.nn.ModuleList() - - if isinstance(train_config.folds, list): - self.num_heads = len(train_config.folds) - else: - self.num_heads = train_config.folds - - for i in range(self.num_heads): - self.heads.append(HeadVQA(train_config)) - - - def forward(self, img_features, question_features): - - output = [] - output_aux = [] - - for head in self.heads: - - logits, logits_aux = head(img_features, question_features) - - probs = logits.softmax(-1) - probs_aux = logits_aux.softmax(-1) - - output.append(probs) - output_aux.append(probs_aux) - - output = torch.stack(output, dim=-1).mean(-1) - output_aux = torch.stack(output_aux, dim=-1).mean(-1) - - return output, output_aux - -def merge_vqa(train_config): - - # Initialize model - model = NetVQA(train_config) - - - for fold in train_config.folds: - - print("load weights from fold {} into head {}".format(fold, fold)) - - checkpoint_path = "{}/{}/fold_{}".format(train_config.model_path, train_config.model, fold) - - if train_config.crossvalidation: - # load best checkpoint - model_state_dict = torch.load('{}/weights_best.pth'.format(checkpoint_path)) - else: - # load checkpoint on train end - model_state_dict = torch.load('{}/weights_end.pth'.format(checkpoint_path)) - - model.heads[fold].load_state_dict(model_state_dict, strict=True) - - checkpoint_path = "{}/{}/weights_merged.pth".format(train_config.model_path, train_config.model) - - print("Saving weights of merged model:", checkpoint_path) - - torch.save(model.state_dict(), checkpoint_path) - - return model - - - - - - - - \ No newline at end of file diff --git a/spaces/CVPR/drawings-to-human/frontend/src/app.css b/spaces/CVPR/drawings-to-human/frontend/src/app.css deleted file mode 100644 index f39386e47449716dc5a0c936162a38263e61b2b7..0000000000000000000000000000000000000000 --- a/spaces/CVPR/drawings-to-human/frontend/src/app.css +++ /dev/null @@ -1,10 +0,0 @@ -@import url('https://fonts.googleapis.com/css2?family=Open+Sans:wght@100;200;300;400;500;600;700;800&display=swap'); -@tailwind base; -@tailwind components; -@tailwind utilities; - -@layer base { - html { - font-family: 'Open Sans', sans-serif; - } -} diff --git a/spaces/CVPR/lama-example/fetch_data/places_standard_test_val_sample.sh b/spaces/CVPR/lama-example/fetch_data/places_standard_test_val_sample.sh deleted file mode 100644 index 7b581f457e32e339d7a480845de27d37d0171322..0000000000000000000000000000000000000000 --- a/spaces/CVPR/lama-example/fetch_data/places_standard_test_val_sample.sh +++ /dev/null @@ -1,22 +0,0 @@ -mkdir -p places_standard_dataset/val_hires/ -mkdir -p places_standard_dataset/visual_test_hires/ - - -# randomly sample images for test and vis -OUT=$(python3 fetch_data/sampler.py) -echo ${OUT} - -FILELIST=$(cat places_standard_dataset/original/test_random_files.txt) - -for i in $FILELIST -do - $(cp ${i} places_standard_dataset/val_hires/) -done - -FILELIST=$(cat places_standard_dataset/original/val_random_files.txt) - -for i in $FILELIST -do - $(cp ${i} places_standard_dataset/visual_test_hires/) -done - diff --git a/spaces/Charliee/BingAi/README.md b/spaces/Charliee/BingAi/README.md deleted file mode 100644 index 60217905a737d93ca7432e23ea25811fd029f0c4..0000000000000000000000000000000000000000 --- a/spaces/Charliee/BingAi/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: BingAi -emoji: 🏃 -colorFrom: indigo -colorTo: green -sdk: docker -pinned: false -license: mit -app_port: 8080 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/CikeyQI/meme-api/meme_generator/memes/garbage/__init__.py b/spaces/CikeyQI/meme-api/meme_generator/memes/garbage/__init__.py deleted file mode 100644 index c0c75d8c4055c555419af21fece78e12e3da94c6..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/meme-api/meme_generator/memes/garbage/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -from pathlib import Path -from typing import List - -from PIL.Image import Image as IMG -from pil_utils import BuildImage - -from meme_generator import add_meme -from meme_generator.utils import save_gif - -img_dir = Path(__file__).parent / "images" - - -def garbage(images: List[BuildImage], texts, args): - img = images[0].convert("RGBA").square().resize((79, 79)) - # fmt: off - locs = ( - [] + [(39, 40)] * 3 + [(39, 30)] * 2 + [(39, 32)] * 10 - + [(39, 30), (39, 27), (39, 32), (37, 49), (37, 64), - (37, 67), (37, 67), (39, 69), (37, 70), (37, 70)] - ) - # fmt: on - frames: List[IMG] = [] - for i in range(25): - frame = BuildImage.open(img_dir / f"{i}.png") - frame.paste(img, locs[i], below=True) - frames.append(frame.image) - return save_gif(frames, 0.04) - - -add_meme("garbage", garbage, min_images=1, max_images=1, keywords=["垃圾", "垃圾桶"]) diff --git a/spaces/DCandE/rvc-models/infer_pack/models_onnx.py b/spaces/DCandE/rvc-models/infer_pack/models_onnx.py deleted file mode 100644 index 3cdae2f7f8591a1e43b1d8520baa37b7e9744d72..0000000000000000000000000000000000000000 --- a/spaces/DCandE/rvc-models/infer_pack/models_onnx.py +++ /dev/null @@ -1,849 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder256Sim(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, pitch, nsff0, sid, rnd, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class SynthesizerTrnMs256NSFsid_sim(nn.Module): - """ - Synthesizer for Training - """ - - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - # hop_length, - gin_channels=0, - use_sdp=True, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256Sim( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - is_half=kwargs["is_half"], - ) - - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, ds, max_len=None - ): # y是spec不需要了现在 - g = self.emb_g(ds.unsqueeze(0)).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/macUtils.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/macUtils.py deleted file mode 100644 index 468a75ad6d2da59bf00bbb07063ba4819aff64dd..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/macUtils.py +++ /dev/null @@ -1,54 +0,0 @@ -"""ttLib.macUtils.py -- Various Mac-specific stuff.""" -from io import BytesIO -from fontTools.misc.macRes import ResourceReader, ResourceError - - -def getSFNTResIndices(path): - """Determine whether a file has a 'sfnt' resource fork or not.""" - try: - reader = ResourceReader(path) - indices = reader.getIndices("sfnt") - reader.close() - return indices - except ResourceError: - return [] - - -def openTTFonts(path): - """Given a pathname, return a list of TTFont objects. In the case - of a flat TTF/OTF file, the list will contain just one font object; - but in the case of a Mac font suitcase it will contain as many - font objects as there are sfnt resources in the file. - """ - from fontTools import ttLib - - fonts = [] - sfnts = getSFNTResIndices(path) - if not sfnts: - fonts.append(ttLib.TTFont(path)) - else: - for index in sfnts: - fonts.append(ttLib.TTFont(path, index)) - if not fonts: - raise ttLib.TTLibError("no fonts found in file '%s'" % path) - return fonts - - -class SFNTResourceReader(BytesIO): - - """Simple read-only file wrapper for 'sfnt' resources.""" - - def __init__(self, path, res_name_or_index): - from fontTools import ttLib - - reader = ResourceReader(path) - if isinstance(res_name_or_index, str): - rsrc = reader.getNamedResource("sfnt", res_name_or_index) - else: - rsrc = reader.getIndResource("sfnt", res_name_or_index) - if rsrc is None: - raise ttLib.TTLibError("sfnt resource not found: %s" % res_name_or_index) - reader.close() - self.rsrc = rsrc - super(SFNTResourceReader, self).__init__(rsrc.data) - self.name = path diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/share.html b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/share.html deleted file mode 100644 index 2aa4ca9721b9f4f299b97b324cdf0eb9fc1111e0..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/share.html +++ /dev/null @@ -1,84 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/spaces/DaleChen/AutoGPT/tests/unit/test_chat.py b/spaces/DaleChen/AutoGPT/tests/unit/test_chat.py deleted file mode 100644 index 774f4103762c28d5a02e89c14b224fae0bc0756a..0000000000000000000000000000000000000000 --- a/spaces/DaleChen/AutoGPT/tests/unit/test_chat.py +++ /dev/null @@ -1,86 +0,0 @@ -# Generated by CodiumAI -import time -import unittest -from unittest.mock import patch - -from autogpt.chat import create_chat_message, generate_context - - -class TestChat(unittest.TestCase): - # Tests that the function returns a dictionary with the correct keys and values when valid strings are provided for role and content. - def test_happy_path_role_content(self): - result = create_chat_message("system", "Hello, world!") - self.assertEqual(result, {"role": "system", "content": "Hello, world!"}) - - # Tests that the function returns a dictionary with the correct keys and values when empty strings are provided for role and content. - def test_empty_role_content(self): - result = create_chat_message("", "") - self.assertEqual(result, {"role": "", "content": ""}) - - # Tests the behavior of the generate_context function when all input parameters are empty. - @patch("time.strftime") - def test_generate_context_empty_inputs(self, mock_strftime): - # Mock the time.strftime function to return a fixed value - mock_strftime.return_value = "Sat Apr 15 00:00:00 2023" - # Arrange - prompt = "" - relevant_memory = "" - full_message_history = [] - model = "gpt-3.5-turbo-0301" - - # Act - result = generate_context(prompt, relevant_memory, full_message_history, model) - - # Assert - expected_result = ( - -1, - 47, - 3, - [ - {"role": "system", "content": ""}, - { - "role": "system", - "content": f"The current time and date is {time.strftime('%c')}", - }, - { - "role": "system", - "content": f"This reminds you of these events from your past:\n\n\n", - }, - ], - ) - self.assertEqual(result, expected_result) - - # Tests that the function successfully generates a current_context given valid inputs. - def test_generate_context_valid_inputs(self): - # Given - prompt = "What is your favorite color?" - relevant_memory = "You once painted your room blue." - full_message_history = [ - create_chat_message("user", "Hi there!"), - create_chat_message("assistant", "Hello! How can I assist you today?"), - create_chat_message("user", "Can you tell me a joke?"), - create_chat_message( - "assistant", - "Why did the tomato turn red? Because it saw the salad dressing!", - ), - create_chat_message("user", "Haha, that's funny."), - ] - model = "gpt-3.5-turbo-0301" - - # When - result = generate_context(prompt, relevant_memory, full_message_history, model) - - # Then - self.assertIsInstance(result[0], int) - self.assertIsInstance(result[1], int) - self.assertIsInstance(result[2], int) - self.assertIsInstance(result[3], list) - self.assertGreaterEqual(result[0], 0) - self.assertGreaterEqual(result[1], 0) - self.assertGreaterEqual(result[2], 0) - self.assertGreaterEqual( - len(result[3]), 3 - ) # current_context should have at least 3 messages - self.assertLessEqual( - result[1], 2048 - ) # token limit for GPT-3.5-turbo-0301 is 2048 tokens diff --git a/spaces/DemoLou/moe-tts/utils.py b/spaces/DemoLou/moe-tts/utils.py deleted file mode 100644 index 4cb5b43d0ca2bae496e7871b2094f2ffb26ab642..0000000000000000000000000000000000000000 --- a/spaces/DemoLou/moe-tts/utils.py +++ /dev/null @@ -1,226 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.ERROR) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r", encoding="utf-8") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/Eemansleepdeprived/Study_For_Me_AI/app.py b/spaces/Eemansleepdeprived/Study_For_Me_AI/app.py deleted file mode 100644 index 671badfe4d6b42ba12b3fdaa5428a867bb608875..0000000000000000000000000000000000000000 --- a/spaces/Eemansleepdeprived/Study_For_Me_AI/app.py +++ /dev/null @@ -1,44 +0,0 @@ -import streamlit as st -import os - - -# st.title('Ask Me Anything 📚') -st.markdown("

Ask Me Anything 🎓

", unsafe_allow_html=True) -st.markdown('') -st.markdown('') - -st.session_state['new']=True -# if st.session_state.new==True: -# os.system('!pip install torch==1.10.2+cu113 torchvision==0.11.3+cu113 torchaudio===0.10.2+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html') -# os.system('!pip install transformers') -# st.session_state.new=False - -from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline - -form = st.form(key='my_form') - -# creating the q/a pipeline -nlp = pipeline('question-answering', model='deepset/roberta-base-squad2', tokenizer='deepset/roberta-base-squad2') - -text = form.text_area('Gimme Stuff To Study 📚') - -submit_button = form.form_submit_button(label='Study This') - -st.markdown('---') -ques=st.text_input('Ask Me Anything From The Information You Have Given') - -#forming a question directory -ques_dict = { - 'question':ques, - 'context':text - } - -butt = st.button('Ask 🤷🏻') - -if butt==True: - results = nlp(ques_dict) - st.markdown('---') - st.subheader('Here Is Your Answer') - st.success(results['answer']) - st.balloons() - diff --git a/spaces/Egrt/LicenseGAN/utils/utils.py b/spaces/Egrt/LicenseGAN/utils/utils.py deleted file mode 100644 index 69adce03409348f84080abffa5bd5c82b67069ed..0000000000000000000000000000000000000000 --- a/spaces/Egrt/LicenseGAN/utils/utils.py +++ /dev/null @@ -1,162 +0,0 @@ -import itertools -import numpy as np -import matplotlib.pyplot as plt -import torch -from torch.nn import functional as F -# import cv2 -import distutils.util - -def show_result(num_epoch, G_net, imgs_lr, imgs_hr): - with torch.no_grad(): - test_images = G_net(imgs_lr) - - fig, ax = plt.subplots(1, 2) - - for j in itertools.product(range(2)): - ax[j].get_xaxis().set_visible(False) - ax[j].get_yaxis().set_visible(False) - - ax[0].cla() - ax[0].imshow(np.transpose(test_images.cpu().numpy()[0] * 0.5 + 0.5, [1,2,0])) - - ax[1].cla() - ax[1].imshow(np.transpose(imgs_hr.cpu().numpy()[0] * 0.5 + 0.5, [1,2,0])) - - label = 'Epoch {0}'.format(num_epoch) - fig.text(0.5, 0.04, label, ha='center') - plt.savefig("results/train_out/epoch_" + str(num_epoch) + "_results.png") - plt.close('all') #避免内存泄漏 - -#---------------------------------------------------------# -# 将图像转换成RGB图像,防止灰度图在预测时报错。 -# 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB -#---------------------------------------------------------# -def cvtColor(image): - if len(np.shape(image)) == 3 and np.shape(image)[2] == 3: - return image - else: - image = image.convert('RGB') - return image - -def preprocess_input(image, mean, std): - image = (image/255 - mean)/std - return image - -def get_lr(optimizer): - for param_group in optimizer.param_groups: - return param_group['lr'] - -def print_arguments(args): - print("----------- Configuration Arguments -----------") - for arg, value in sorted(vars(args).items()): - print("%s: %s" % (arg, value)) - print("------------------------------------------------") - - -def add_arguments(argname, type, default, help, argparser, **kwargs): - type = distutils.util.strtobool if type == bool else type - argparser.add_argument("--" + argname, - default=default, - type=type, - help=help + ' 默认: %(default)s.', - **kwargs) - -def filter2D(img, kernel): - """PyTorch version of cv2.filter2D - - Args: - img (Tensor): (b, c, h, w) - kernel (Tensor): (b, k, k) - """ - k = kernel.size(-1) - b, c, h, w = img.size() - if k % 2 == 1: - img = F.pad(img, (k // 2, k // 2, k // 2, k // 2), mode='reflect') - else: - raise ValueError('Wrong kernel size') - - ph, pw = img.size()[-2:] - - if kernel.size(0) == 1: - # apply the same kernel to all batch images - img = img.view(b * c, 1, ph, pw) - kernel = kernel.view(1, 1, k, k) - return F.conv2d(img, kernel, padding=0).view(b, c, h, w) - else: - img = img.view(1, b * c, ph, pw) - kernel = kernel.view(b, 1, k, k).repeat(1, c, 1, 1).view(b * c, 1, k, k) - return F.conv2d(img, kernel, groups=b * c).view(b, c, h, w) - - -def usm_sharp(img, weight=0.5, radius=50, threshold=10): - """USM sharpening. - - Input image: I; Blurry image: B. - 1. sharp = I + weight * (I - B) - 2. Mask = 1 if abs(I - B) > threshold, else: 0 - 3. Blur mask: - 4. Out = Mask * sharp + (1 - Mask) * I - - - Args: - img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. - weight (float): Sharp weight. Default: 1. - radius (float): Kernel size of Gaussian blur. Default: 50. - threshold (int): - """ - if radius % 2 == 0: - radius += 1 - blur = cv2.GaussianBlur(img, (radius, radius), 0) - residual = img - blur - mask = np.abs(residual) * 255 > threshold - mask = mask.astype('float32') - soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) - - sharp = img + weight * residual - sharp = np.clip(sharp, 0, 1) - return soft_mask * sharp + (1 - soft_mask) * img - - -class USMSharp(torch.nn.Module): - - def __init__(self, radius=50, sigma=0): - super(USMSharp, self).__init__() - if radius % 2 == 0: - radius += 1 - self.radius = radius - kernel = cv2.getGaussianKernel(radius, sigma) - kernel = torch.FloatTensor(np.dot(kernel, kernel.transpose())).unsqueeze_(0) - self.register_buffer('kernel', kernel) - - def forward(self, img, weight=0.5, threshold=10): - blur = filter2D(img, self.kernel) - residual = img - blur - - mask = torch.abs(residual) * 255 > threshold - mask = mask.float() - soft_mask = filter2D(mask, self.kernel) - sharp = img + weight * residual - sharp = torch.clip(sharp, 0, 1) - return soft_mask * sharp + (1 - soft_mask) * img - -class USMSharp_npy(): - - def __init__(self, radius=50, sigma=0): - super(USMSharp_npy, self).__init__() - if radius % 2 == 0: - radius += 1 - self.radius = radius - kernel = cv2.getGaussianKernel(radius, sigma) - self.kernel = np.dot(kernel, kernel.transpose()).astype(np.float32) - - def filt(self, img, weight=0.5, threshold=10): - blur = cv2.filter2D(img, -1, self.kernel) - residual = img - blur - - mask = np.abs(residual) * 255 > threshold - mask = mask.astype(np.float32) - soft_mask = cv2.filter2D(mask, -1, self.kernel) - sharp = img + weight * residual - sharp = np.clip(sharp, 0, 1) - return soft_mask * sharp + (1 - soft_mask) * img - diff --git a/spaces/Ekimetrics/Biomap/biomap/app.py b/spaces/Ekimetrics/Biomap/biomap/app.py deleted file mode 100644 index dc89b4663abdd834644f7509e88a4e5889725e52..0000000000000000000000000000000000000000 --- a/spaces/Ekimetrics/Biomap/biomap/app.py +++ /dev/null @@ -1,117 +0,0 @@ -from plot_functions import * -import hydra - -import torch -from model import LitUnsupervisedSegmenter -from helper import inference_on_location_and_month, inference_on_location -from plot_functions import segment_region - -from functools import partial -import gradio as gr -import logging -import sys - -import geopandas as gpd -mapbox_access_token = "pk.eyJ1IjoiamVyZW15LWVraW1ldHJpY3MiLCJhIjoiY2xrNjBwNGU2MDRhMjNqbWw0YTJrbnpvNCJ9.poVyIzhJuJmD6ffrL9lm2w" -geo_df = gpd.read_file(gpd.datasets.get_path('naturalearth_cities')) - -def get_geomap(long, lat ): - fig = go.Figure(go.Scattermapbox( - lat=geo_df.geometry.y, - lon=geo_df.geometry.x, - mode='markers', - marker=go.scattermapbox.Marker( - size=14 - ), - text=geo_df.name, - )) - - fig.add_trace(go.Scattermapbox(lat=[lat], - lon=[long], - mode='markers', - marker=go.scattermapbox.Marker( - size=14 - ), - marker_color="green", - text=['Actual position'])) - - fig.update_layout( - showlegend=False, - hovermode='closest', - mapbox=dict( - accesstoken=mapbox_access_token, - center=go.layout.mapbox.Center( - lat=lat, - lon=long - ), - zoom=3 - ) - ) - - return fig - - -if __name__ == "__main__": - file_handler = logging.FileHandler(filename='biomap.log') - stdout_handler = logging.StreamHandler(stream=sys.stdout) - handlers = [file_handler, stdout_handler] - - logging.basicConfig(handlers=handlers, encoding='utf-8', level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s") - # Initialize hydra with configs - hydra.initialize(config_path="configs", job_name="corine") - cfg = hydra.compose(config_name="my_train_config.yml") - logging.info(f"config : {cfg}") - - nbclasses = cfg.dir_dataset_n_classes - model = LitUnsupervisedSegmenter(nbclasses, cfg) - model = model.cpu() - logging.info(f"Model Initialiazed") - - model_path = "biomap/checkpoint/model/model.pt" - saved_state_dict = torch.load(model_path, map_location=torch.device("cpu")) - logging.info(f"Model weights Loaded") - model.load_state_dict(saved_state_dict) - logging.info(f"Model Loaded") - with gr.Blocks(title="Biomap by Ekimetrics") as demo: - gr.Markdown("

🐢 Biomap by Ekimetrics 🐢

") - gr.Markdown("

Estimate Biodiversity score in the world by using segmentation of land.

") - gr.Markdown("Land use is divided into 6 differents classes :Each class is assigned a GBS score from 0 to 1") - gr.Markdown("Buildings : 0.1 | Infrastructure : 0.1 | Cultivation : 0.4 | Wetland : 0.9 | Water : 0.9 | Natural green : 1 ") - gr.Markdown("The score is then average on the full image.") - with gr.Tab("Single Image"): - with gr.Row(): - input_map = gr.Plot() - with gr.Column(): - with gr.Row(): - input_latitude = gr.Number(label="lattitude", value=2.98) - input_longitude = gr.Number(label="longitude", value=48.81) - input_date = gr.Textbox(label="start_date", value="2020-03-20") - - single_button = gr.Button("Predict") - with gr.Row(): - raw_image = gr.Image(label = "Localisation visualization") - output_image = gr.Image(label = "Labeled visualisation") - score_biodiv = gr.Number(label = "Biodiversity score") - - with gr.Tab("TimeLapse"): - with gr.Row(): - input_map_2 = gr.Plot() - with gr.Column(): - with gr.Row(): - timelapse_input_latitude = gr.Number(value=2.98, label="Latitude") - timelapse_input_longitude = gr.Number(value=48.81, label="Longitude") - with gr.Row(): - timelapse_start_date = gr.Dropdown(choices=[2017,2018,2019,2020,2021,2022,2023], value=2020, label="Start Date") - timelapse_end_date = gr.Dropdown(choices=[2017,2018,2019,2020,2021,2022,2023], value=2021, label="End Date") - segmentation = gr.Radio(choices=['month', 'year', '2months'], value='year', label="Interval of time between two segmentation") - timelapse_button = gr.Button(value="Predict") - map = gr.Plot() - - demo.load(get_geomap, [input_latitude, input_longitude], input_map) - single_button.click(get_geomap, [input_latitude, input_longitude], input_map) - single_button.click(partial(inference_on_location_and_month, model), inputs=[input_latitude, input_longitude, input_date], outputs=[raw_image, output_image,score_biodiv]) - - demo.load(get_geomap, [timelapse_input_latitude, timelapse_input_longitude], input_map_2) - timelapse_button.click(get_geomap, [timelapse_input_latitude, timelapse_input_longitude], input_map_2) - timelapse_button.click(partial(inference_on_location, model), inputs=[timelapse_input_latitude, timelapse_input_longitude, timelapse_start_date, timelapse_end_date,segmentation], outputs=[map]) - demo.launch() diff --git a/spaces/EleutherAI/VQGAN_CLIP/taming-transformers/taming/modules/transformer/permuter.py b/spaces/EleutherAI/VQGAN_CLIP/taming-transformers/taming/modules/transformer/permuter.py deleted file mode 100644 index 0d43bb135adde38d94bf18a7e5edaa4523cd95cf..0000000000000000000000000000000000000000 --- a/spaces/EleutherAI/VQGAN_CLIP/taming-transformers/taming/modules/transformer/permuter.py +++ /dev/null @@ -1,248 +0,0 @@ -import torch -import torch.nn as nn -import numpy as np - - -class AbstractPermuter(nn.Module): - def __init__(self, *args, **kwargs): - super().__init__() - def forward(self, x, reverse=False): - raise NotImplementedError - - -class Identity(AbstractPermuter): - def __init__(self): - super().__init__() - - def forward(self, x, reverse=False): - return x - - -class Subsample(AbstractPermuter): - def __init__(self, H, W): - super().__init__() - C = 1 - indices = np.arange(H*W).reshape(C,H,W) - while min(H, W) > 1: - indices = indices.reshape(C,H//2,2,W//2,2) - indices = indices.transpose(0,2,4,1,3) - indices = indices.reshape(C*4,H//2, W//2) - H = H//2 - W = W//2 - C = C*4 - assert H == W == 1 - idx = torch.tensor(indices.ravel()) - self.register_buffer('forward_shuffle_idx', - nn.Parameter(idx, requires_grad=False)) - self.register_buffer('backward_shuffle_idx', - nn.Parameter(torch.argsort(idx), requires_grad=False)) - - def forward(self, x, reverse=False): - if not reverse: - return x[:, self.forward_shuffle_idx] - else: - return x[:, self.backward_shuffle_idx] - - -def mortonify(i, j): - """(i,j) index to linear morton code""" - i = np.uint64(i) - j = np.uint64(j) - - z = np.uint(0) - - for pos in range(32): - z = (z | - ((j & (np.uint64(1) << np.uint64(pos))) << np.uint64(pos)) | - ((i & (np.uint64(1) << np.uint64(pos))) << np.uint64(pos+1)) - ) - return z - - -class ZCurve(AbstractPermuter): - def __init__(self, H, W): - super().__init__() - reverseidx = [np.int64(mortonify(i,j)) for i in range(H) for j in range(W)] - idx = np.argsort(reverseidx) - idx = torch.tensor(idx) - reverseidx = torch.tensor(reverseidx) - self.register_buffer('forward_shuffle_idx', - idx) - self.register_buffer('backward_shuffle_idx', - reverseidx) - - def forward(self, x, reverse=False): - if not reverse: - return x[:, self.forward_shuffle_idx] - else: - return x[:, self.backward_shuffle_idx] - - -class SpiralOut(AbstractPermuter): - def __init__(self, H, W): - super().__init__() - assert H == W - size = W - indices = np.arange(size*size).reshape(size,size) - - i0 = size//2 - j0 = size//2-1 - - i = i0 - j = j0 - - idx = [indices[i0, j0]] - step_mult = 0 - for c in range(1, size//2+1): - step_mult += 1 - # steps left - for k in range(step_mult): - i = i - 1 - j = j - idx.append(indices[i, j]) - - # step down - for k in range(step_mult): - i = i - j = j + 1 - idx.append(indices[i, j]) - - step_mult += 1 - if c < size//2: - # step right - for k in range(step_mult): - i = i + 1 - j = j - idx.append(indices[i, j]) - - # step up - for k in range(step_mult): - i = i - j = j - 1 - idx.append(indices[i, j]) - else: - # end reached - for k in range(step_mult-1): - i = i + 1 - idx.append(indices[i, j]) - - assert len(idx) == size*size - idx = torch.tensor(idx) - self.register_buffer('forward_shuffle_idx', idx) - self.register_buffer('backward_shuffle_idx', torch.argsort(idx)) - - def forward(self, x, reverse=False): - if not reverse: - return x[:, self.forward_shuffle_idx] - else: - return x[:, self.backward_shuffle_idx] - - -class SpiralIn(AbstractPermuter): - def __init__(self, H, W): - super().__init__() - assert H == W - size = W - indices = np.arange(size*size).reshape(size,size) - - i0 = size//2 - j0 = size//2-1 - - i = i0 - j = j0 - - idx = [indices[i0, j0]] - step_mult = 0 - for c in range(1, size//2+1): - step_mult += 1 - # steps left - for k in range(step_mult): - i = i - 1 - j = j - idx.append(indices[i, j]) - - # step down - for k in range(step_mult): - i = i - j = j + 1 - idx.append(indices[i, j]) - - step_mult += 1 - if c < size//2: - # step right - for k in range(step_mult): - i = i + 1 - j = j - idx.append(indices[i, j]) - - # step up - for k in range(step_mult): - i = i - j = j - 1 - idx.append(indices[i, j]) - else: - # end reached - for k in range(step_mult-1): - i = i + 1 - idx.append(indices[i, j]) - - assert len(idx) == size*size - idx = idx[::-1] - idx = torch.tensor(idx) - self.register_buffer('forward_shuffle_idx', idx) - self.register_buffer('backward_shuffle_idx', torch.argsort(idx)) - - def forward(self, x, reverse=False): - if not reverse: - return x[:, self.forward_shuffle_idx] - else: - return x[:, self.backward_shuffle_idx] - - -class Random(nn.Module): - def __init__(self, H, W): - super().__init__() - indices = np.random.RandomState(1).permutation(H*W) - idx = torch.tensor(indices.ravel()) - self.register_buffer('forward_shuffle_idx', idx) - self.register_buffer('backward_shuffle_idx', torch.argsort(idx)) - - def forward(self, x, reverse=False): - if not reverse: - return x[:, self.forward_shuffle_idx] - else: - return x[:, self.backward_shuffle_idx] - - -class AlternateParsing(AbstractPermuter): - def __init__(self, H, W): - super().__init__() - indices = np.arange(W*H).reshape(H,W) - for i in range(1, H, 2): - indices[i, :] = indices[i, ::-1] - idx = indices.flatten() - assert len(idx) == H*W - idx = torch.tensor(idx) - self.register_buffer('forward_shuffle_idx', idx) - self.register_buffer('backward_shuffle_idx', torch.argsort(idx)) - - def forward(self, x, reverse=False): - if not reverse: - return x[:, self.forward_shuffle_idx] - else: - return x[:, self.backward_shuffle_idx] - - -if __name__ == "__main__": - p0 = AlternateParsing(16, 16) - print(p0.forward_shuffle_idx) - print(p0.backward_shuffle_idx) - - x = torch.randint(0, 768, size=(11, 256)) - y = p0(x) - xre = p0(y, reverse=True) - assert torch.equal(x, xre) - - p1 = SpiralOut(2, 2) - print(p1.forward_shuffle_idx) - print(p1.backward_shuffle_idx) diff --git a/spaces/Epoching/GLIDE_Inpaint/glide_text2im/unet.py b/spaces/Epoching/GLIDE_Inpaint/glide_text2im/unet.py deleted file mode 100644 index b61437a44ef7510e0c62afaae070deabc24c42bb..0000000000000000000000000000000000000000 --- a/spaces/Epoching/GLIDE_Inpaint/glide_text2im/unet.py +++ /dev/null @@ -1,635 +0,0 @@ -import math -from abc import abstractmethod - -import torch as th -import torch.nn as nn -import torch.nn.functional as F - -from .fp16_util import convert_module_to_f16, convert_module_to_f32 -from .nn import avg_pool_nd, conv_nd, linear, normalization, timestep_embedding, zero_module - - -class TimestepBlock(nn.Module): - """ - Any module where forward() takes timestep embeddings as a second argument. - """ - - @abstractmethod - def forward(self, x, emb): - """ - Apply the module to `x` given `emb` timestep embeddings. - """ - - -class TimestepEmbedSequential(nn.Sequential, TimestepBlock): - """ - A sequential module that passes timestep embeddings to the children that - support it as an extra input. - """ - - def forward(self, x, emb, encoder_out=None): - for layer in self: - if isinstance(layer, TimestepBlock): - x = layer(x, emb) - elif isinstance(layer, AttentionBlock): - x = layer(x, encoder_out) - else: - x = layer(x) - return x - - -class Upsample(nn.Module): - """ - An upsampling layer with an optional convolution. - - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then - upsampling occurs in the inner-two dimensions. - """ - - def __init__(self, channels, use_conv, dims=2, out_channels=None): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.dims = dims - if use_conv: - self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1) - - def forward(self, x): - assert x.shape[1] == self.channels - if self.dims == 3: - x = F.interpolate(x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest") - else: - x = F.interpolate(x, scale_factor=2, mode="nearest") - if self.use_conv: - x = self.conv(x) - return x - - -class Downsample(nn.Module): - """ - A downsampling layer with an optional convolution. - - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then - downsampling occurs in the inner-two dimensions. - """ - - def __init__(self, channels, use_conv, dims=2, out_channels=None): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.dims = dims - stride = 2 if dims != 3 else (1, 2, 2) - if use_conv: - self.op = conv_nd(dims, self.channels, self.out_channels, 3, stride=stride, padding=1) - else: - assert self.channels == self.out_channels - self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) - - def forward(self, x): - assert x.shape[1] == self.channels - return self.op(x) - - -class ResBlock(TimestepBlock): - """ - A residual block that can optionally change the number of channels. - - :param channels: the number of input channels. - :param emb_channels: the number of timestep embedding channels. - :param dropout: the rate of dropout. - :param out_channels: if specified, the number of out channels. - :param use_conv: if True and out_channels is specified, use a spatial - convolution instead of a smaller 1x1 convolution to change the - channels in the skip connection. - :param dims: determines if the signal is 1D, 2D, or 3D. - :param use_checkpoint: if True, use gradient checkpointing on this module. - :param up: if True, use this block for upsampling. - :param down: if True, use this block for downsampling. - """ - - def __init__( - self, - channels, - emb_channels, - dropout, - out_channels=None, - use_conv=False, - use_scale_shift_norm=False, - dims=2, - use_checkpoint=False, - up=False, - down=False, - ): - super().__init__() - self.channels = channels - self.emb_channels = emb_channels - self.dropout = dropout - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.use_checkpoint = use_checkpoint - self.use_scale_shift_norm = use_scale_shift_norm - - self.in_layers = nn.Sequential( - normalization(channels, swish=1.0), - nn.Identity(), - conv_nd(dims, channels, self.out_channels, 3, padding=1), - ) - - self.updown = up or down - - if up: - self.h_upd = Upsample(channels, False, dims) - self.x_upd = Upsample(channels, False, dims) - elif down: - self.h_upd = Downsample(channels, False, dims) - self.x_upd = Downsample(channels, False, dims) - else: - self.h_upd = self.x_upd = nn.Identity() - - self.emb_layers = nn.Sequential( - nn.SiLU(), - linear( - emb_channels, - 2 * self.out_channels if use_scale_shift_norm else self.out_channels, - ), - ) - self.out_layers = nn.Sequential( - normalization(self.out_channels, swish=0.0 if use_scale_shift_norm else 1.0), - nn.SiLU() if use_scale_shift_norm else nn.Identity(), - nn.Dropout(p=dropout), - zero_module(conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)), - ) - - if self.out_channels == channels: - self.skip_connection = nn.Identity() - elif use_conv: - self.skip_connection = conv_nd(dims, channels, self.out_channels, 3, padding=1) - else: - self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) - - def forward(self, x, emb): - """ - Apply the block to a Tensor, conditioned on a timestep embedding. - - :param x: an [N x C x ...] Tensor of features. - :param emb: an [N x emb_channels] Tensor of timestep embeddings. - :return: an [N x C x ...] Tensor of outputs. - """ - if self.updown: - in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] - h = in_rest(x) - h = self.h_upd(h) - x = self.x_upd(x) - h = in_conv(h) - else: - h = self.in_layers(x) - emb_out = self.emb_layers(emb).type(h.dtype) - while len(emb_out.shape) < len(h.shape): - emb_out = emb_out[..., None] - if self.use_scale_shift_norm: - out_norm, out_rest = self.out_layers[0], self.out_layers[1:] - scale, shift = th.chunk(emb_out, 2, dim=1) - h = out_norm(h) * (1 + scale) + shift - h = out_rest(h) - else: - h = h + emb_out - h = self.out_layers(h) - return self.skip_connection(x) + h - - -class AttentionBlock(nn.Module): - """ - An attention block that allows spatial positions to attend to each other. - - Originally ported from here, but adapted to the N-d case. - https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. - """ - - def __init__( - self, - channels, - num_heads=1, - num_head_channels=-1, - use_checkpoint=False, - encoder_channels=None, - ): - super().__init__() - self.channels = channels - if num_head_channels == -1: - self.num_heads = num_heads - else: - assert ( - channels % num_head_channels == 0 - ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" - self.num_heads = channels // num_head_channels - self.use_checkpoint = use_checkpoint - self.norm = normalization(channels, swish=0.0) - self.qkv = conv_nd(1, channels, channels * 3, 1) - self.attention = QKVAttention(self.num_heads) - - if encoder_channels is not None: - self.encoder_kv = conv_nd(1, encoder_channels, channels * 2, 1) - self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) - - def forward(self, x, encoder_out=None): - b, c, *spatial = x.shape - qkv = self.qkv(self.norm(x).view(b, c, -1)) - if encoder_out is not None: - encoder_out = self.encoder_kv(encoder_out) - h = self.attention(qkv, encoder_out) - else: - h = self.attention(qkv) - h = self.proj_out(h) - return x + h.reshape(b, c, *spatial) - - -class QKVAttention(nn.Module): - """ - A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv, encoder_kv=None): - """ - Apply QKV attention. - - :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. - :return: an [N x (H * C) x T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) - if encoder_kv is not None: - assert encoder_kv.shape[1] == self.n_heads * ch * 2 - ek, ev = encoder_kv.reshape(bs * self.n_heads, ch * 2, -1).split(ch, dim=1) - k = th.cat([ek, k], dim=-1) - v = th.cat([ev, v], dim=-1) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = th.einsum( - "bct,bcs->bts", q * scale, k * scale - ) # More stable with f16 than dividing afterwards - weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) - a = th.einsum("bts,bcs->bct", weight, v) - return a.reshape(bs, -1, length) - - -class UNetModel(nn.Module): - """ - The full UNet model with attention and timestep embedding. - - :param in_channels: channels in the input Tensor. - :param model_channels: base channel count for the model. - :param out_channels: channels in the output Tensor. - :param num_res_blocks: number of residual blocks per downsample. - :param attention_resolutions: a collection of downsample rates at which - attention will take place. May be a set, list, or tuple. - For example, if this contains 4, then at 4x downsampling, attention - will be used. - :param dropout: the dropout probability. - :param channel_mult: channel multiplier for each level of the UNet. - :param conv_resample: if True, use learned convolutions for upsampling and - downsampling. - :param dims: determines if the signal is 1D, 2D, or 3D. - :param num_classes: if specified (as an int), then this model will be - class-conditional with `num_classes` classes. - :param use_checkpoint: use gradient checkpointing to reduce memory usage. - :param num_heads: the number of attention heads in each attention layer. - :param num_heads_channels: if specified, ignore num_heads and instead use - a fixed channel width per attention head. - :param num_heads_upsample: works with num_heads to set a different number - of heads for upsampling. Deprecated. - :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. - :param resblock_updown: use residual blocks for up/downsampling. - """ - - def __init__( - self, - in_channels, - model_channels, - out_channels, - num_res_blocks, - attention_resolutions, - dropout=0, - channel_mult=(1, 2, 4, 8), - conv_resample=True, - dims=2, - num_classes=None, - use_checkpoint=False, - use_fp16=False, - num_heads=1, - num_head_channels=-1, - num_heads_upsample=-1, - use_scale_shift_norm=False, - resblock_updown=False, - encoder_channels=None, - ): - super().__init__() - - if num_heads_upsample == -1: - num_heads_upsample = num_heads - - self.in_channels = in_channels - self.model_channels = model_channels - self.out_channels = out_channels - self.num_res_blocks = num_res_blocks - self.attention_resolutions = attention_resolutions - self.dropout = dropout - self.channel_mult = channel_mult - self.conv_resample = conv_resample - self.num_classes = num_classes - self.use_checkpoint = use_checkpoint - self.dtype = th.float16 if use_fp16 else th.float32 - self.num_heads = num_heads - self.num_head_channels = num_head_channels - self.num_heads_upsample = num_heads_upsample - - time_embed_dim = model_channels * 4 - self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim), - nn.SiLU(), - linear(time_embed_dim, time_embed_dim), - ) - - if self.num_classes is not None: - self.label_emb = nn.Embedding(num_classes, time_embed_dim) - - ch = input_ch = int(channel_mult[0] * model_channels) - self.input_blocks = nn.ModuleList( - [TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))] - ) - self._feature_size = ch - input_block_chans = [ch] - ds = 1 - for level, mult in enumerate(channel_mult): - for _ in range(num_res_blocks): - layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=int(mult * model_channels), - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = int(mult * model_channels) - if ds in attention_resolutions: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=num_head_channels, - encoder_channels=encoder_channels, - ) - ) - self.input_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - input_block_chans.append(ch) - if level != len(channel_mult) - 1: - out_ch = ch - self.input_blocks.append( - TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True, - ) - if resblock_updown - else Downsample(ch, conv_resample, dims=dims, out_channels=out_ch) - ) - ) - ch = out_ch - input_block_chans.append(ch) - ds *= 2 - self._feature_size += ch - - self.middle_block = TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=num_head_channels, - encoder_channels=encoder_channels, - ), - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - ) - self._feature_size += ch - - self.output_blocks = nn.ModuleList([]) - for level, mult in list(enumerate(channel_mult))[::-1]: - for i in range(num_res_blocks + 1): - ich = input_block_chans.pop() - layers = [ - ResBlock( - ch + ich, - time_embed_dim, - dropout, - out_channels=int(model_channels * mult), - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = int(model_channels * mult) - if ds in attention_resolutions: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads_upsample, - num_head_channels=num_head_channels, - encoder_channels=encoder_channels, - ) - ) - if level and i == num_res_blocks: - out_ch = ch - layers.append( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - up=True, - ) - if resblock_updown - else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) - ) - ds //= 2 - self.output_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - - self.out = nn.Sequential( - normalization(ch, swish=1.0), - nn.Identity(), - zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)), - ) - self.use_fp16 = use_fp16 - - def convert_to_fp16(self): - """ - Convert the torso of the model to float16. - """ - self.input_blocks.apply(convert_module_to_f16) - self.middle_block.apply(convert_module_to_f16) - self.output_blocks.apply(convert_module_to_f16) - - def convert_to_fp32(self): - """ - Convert the torso of the model to float32. - """ - self.input_blocks.apply(convert_module_to_f32) - self.middle_block.apply(convert_module_to_f32) - self.output_blocks.apply(convert_module_to_f32) - - def forward(self, x, timesteps, y=None): - """ - Apply the model to an input batch. - - :param x: an [N x C x ...] Tensor of inputs. - :param timesteps: a 1-D batch of timesteps. - :param y: an [N] Tensor of labels, if class-conditional. - :return: an [N x C x ...] Tensor of outputs. - """ - assert (y is not None) == ( - self.num_classes is not None - ), "must specify y if and only if the model is class-conditional" - - hs = [] - emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) - - if self.num_classes is not None: - assert y.shape == (x.shape[0],) - emb = emb + self.label_emb(y) - - h = x.type(self.dtype) - for module in self.input_blocks: - h = module(h, emb) - hs.append(h) - h = self.middle_block(h, emb) - for module in self.output_blocks: - h = th.cat([h, hs.pop()], dim=1) - h = module(h, emb) - h = h.type(x.dtype) - return self.out(h) - -class SuperResUNetModel(UNetModel): - """ - A UNetModel that performs super-resolution. - - Expects an extra kwarg `low_res` to condition on a low-resolution image. - """ - - def __init__(self, *args, **kwargs): - if "in_channels" in kwargs: - kwargs = dict(kwargs) - kwargs["in_channels"] = kwargs["in_channels"] * 2 - else: - # Curse you, Python. Or really, just curse positional arguments :|. - args = list(args) - args[1] = args[1] * 2 - super().__init__(*args, **kwargs) - - def forward(self, x, timesteps, low_res=None, **kwargs): - _, _, new_height, new_width = x.shape - upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear") - x = th.cat([x, upsampled], dim=1) - return super().forward(x, timesteps, **kwargs) - - -class InpaintUNetModel(UNetModel): - """ - A UNetModel which can perform inpainting. - """ - - def __init__(self, *args, **kwargs): - if "in_channels" in kwargs: - kwargs = dict(kwargs) - kwargs["in_channels"] = kwargs["in_channels"] * 2 + 1 - else: - # Curse you, Python. Or really, just curse positional arguments :|. - args = list(args) - args[1] = args[1] * 2 + 1 - super().__init__(*args, **kwargs) - - def forward(self, x, timesteps, inpaint_image=None, inpaint_mask=None, **kwargs): - if inpaint_image is None: - inpaint_image = th.zeros_like(x) - if inpaint_mask is None: - inpaint_mask = th.zeros_like(x[:, :1]) - return super().forward( - th.cat([x, inpaint_image * inpaint_mask, inpaint_mask], dim=1), - timesteps, - **kwargs, - ) - - -class SuperResInpaintUNetModel(UNetModel): - """ - A UNetModel which can perform both upsampling and inpainting. - """ - - def __init__(self, *args, **kwargs): - if "in_channels" in kwargs: - kwargs = dict(kwargs) - kwargs["in_channels"] = kwargs["in_channels"] * 3 + 1 - else: - # Curse you, Python. Or really, just curse positional arguments :|. - args = list(args) - args[1] = args[1] * 3 + 1 - super().__init__(*args, **kwargs) - - def forward( - self, - x, - timesteps, - inpaint_image=None, - inpaint_mask=None, - low_res=None, - **kwargs, - ): - if inpaint_image is None: - inpaint_image = th.zeros_like(x) - if inpaint_mask is None: - inpaint_mask = th.zeros_like(x[:, :1]) - _, _, new_height, new_width = x.shape - upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear") - return super().forward( - th.cat([x, inpaint_image * inpaint_mask, inpaint_mask, upsampled], dim=1), - timesteps, - **kwargs, - ) diff --git a/spaces/EsoCode/text-generation-webui/modules/exllama_hf.py b/spaces/EsoCode/text-generation-webui/modules/exllama_hf.py deleted file mode 100644 index 181a77a6d7d1ab6648130606539ad183929d36f7..0000000000000000000000000000000000000000 --- a/spaces/EsoCode/text-generation-webui/modules/exllama_hf.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from pathlib import Path -from typing import Any, Dict, Optional, Union - -import torch -from torch.nn import CrossEntropyLoss -from transformers import GenerationConfig, PretrainedConfig, PreTrainedModel -from transformers.modeling_outputs import CausalLMOutputWithPast - -from modules import shared -from modules.logging_colors import logger - -try: - from exllama.model import ExLlama, ExLlamaCache, ExLlamaConfig -except: - logger.warning('Exllama module failed to load. Will attempt to load from repositories.') - try: - from modules.relative_imports import RelativeImport - - with RelativeImport("repositories/exllama"): - from model import ExLlama, ExLlamaCache, ExLlamaConfig - except: - logger.error("Could not find repositories/exllama/. Make sure that exllama is cloned inside repositories/ and is up to date.") - raise - - -class ExllamaHF(PreTrainedModel): - def __init__(self, config: ExLlamaConfig): - super().__init__(PretrainedConfig()) - self.ex_config = config - self.ex_model = ExLlama(self.ex_config) - self.generation_config = GenerationConfig() - self.lora = None - - def _validate_model_class(self): - pass - - def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): - pass - - def prepare_inputs_for_generation(self, input_ids, **kwargs): - return {'input_ids': input_ids, **kwargs} - - @property - def device(self) -> torch.device: - return torch.device(0) - - def __call__(self, *args, **kwargs): - # TODO: Some decoding methods (such as Contrastive Search) may not work at this time - assert len(args) == 0, 'no *args should be passed to forward' - use_cache = kwargs.get('use_cache', True) - labels = kwargs.get('labels', None) - seq = kwargs['input_ids'][0].tolist() - cache = kwargs['past_key_values'] if 'past_key_values' in kwargs else None - if cache is None: - cache = ExLlamaCache(self.ex_model) - self.ex_model.forward(torch.tensor([seq[:-1]], dtype=torch.long), cache, preprocess_only=True, lora=self.lora) - - logits = self.ex_model.forward(torch.tensor([seq[-1:]], dtype=torch.long), cache, lora=self.lora).to(kwargs['input_ids'].device) - - loss = None - if labels is not None: - # Shift so that tokens < n predict n - shift_logits = logits[..., :-1, :].contiguous() - shift_labels = labels[..., 1:].contiguous() - # Flatten the tokens - loss_fct = CrossEntropyLoss() - shift_logits = shift_logits.view(-1, logits.shape[-1]) - shift_labels = shift_labels.view(-1) - # Enable model parallelism - shift_labels = shift_labels.to(shift_logits.device) - loss = loss_fct(shift_logits, shift_labels) - - return CausalLMOutputWithPast(logits=logits, past_key_values=cache if use_cache else None) - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs): - assert len(model_args) == 0 and len(kwargs) == 0, "extra args is currently not supported" - if isinstance(pretrained_model_name_or_path, str): - pretrained_model_name_or_path = Path(pretrained_model_name_or_path) - - pretrained_model_name_or_path = Path(f'{shared.args.model_dir}') / Path(pretrained_model_name_or_path) - config = ExLlamaConfig(pretrained_model_name_or_path / 'config.json') - - # from 'oobabooga/text-generation-webui/modules/exllama.py' - weight_path = None - for ext in ['.safetensors', '.pt', '.bin']: - found = list(pretrained_model_name_or_path.glob(f"*{ext}")) - if len(found) > 0: - weight_path = found[-1] - break - assert weight_path is not None, f'could not find weight in "{pretrained_model_name_or_path}"' - - config.model_path = str(weight_path) - config.max_seq_len = shared.args.max_seq_len - config.compress_pos_emb = shared.args.compress_pos_emb - if shared.args.gpu_split: - config.set_auto_map(shared.args.gpu_split) - config.gpu_peer_fix = True - if torch.version.hip: - config.rmsnorm_no_half2 = True - config.rope_no_half2 = True - config.matmul_no_half2 = True - config.silu_no_half2 = True - - # This slowes down a bit but align better with autogptq generation. - # TODO: Should give user choice to tune the exllama config - # config.fused_attn = False - # config.fused_mlp_thd = 0 - - return ExllamaHF(config) diff --git a/spaces/EuroPython2022/automatic-speech-recognition-with-next-gen-kaldi/test_wavs/aishell2/README.md b/spaces/EuroPython2022/automatic-speech-recognition-with-next-gen-kaldi/test_wavs/aishell2/README.md deleted file mode 100644 index 40a16b2ac43de0a40248b86e198e7077b8e44ee6..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/automatic-speech-recognition-with-next-gen-kaldi/test_wavs/aishell2/README.md +++ /dev/null @@ -1,2 +0,0 @@ -Files are downloaded from -https://huggingface.co/yuekai/icefall-asr-aishell2-pruned-transducer-stateless5-B-2022-07-12/tree/main/test_wavs diff --git a/spaces/EuroPython2022/clickbaitonator/fudge/evaluate_poetry.py b/spaces/EuroPython2022/clickbaitonator/fudge/evaluate_poetry.py deleted file mode 100644 index 4656878c39c53dc4647ee741e953234ec3f49393..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/clickbaitonator/fudge/evaluate_poetry.py +++ /dev/null @@ -1,115 +0,0 @@ -import os -import random -import time -import pickle -import math -from argparse import ArgumentParser -import string -from collections import defaultdict - -from tqdm import tqdm -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from transformers import AutoTokenizer, AutoModelWithLMHead, pipeline, set_seed, GPT2Tokenizer, GPT2Model - -from data import Dataset, load_rhyme_info -from model import Model -from util import save_checkpoint, ProgressMeter, AverageMeter, num_params -from constants import * -from poetry_util import get_rhymes, count_syllables -from predict_poetry import predict_couplet - -def main(args): - with open(args.dataset_info, 'rb') as rf: - dataset_info = pickle.load(rf) - gpt_tokenizer = AutoTokenizer.from_pretrained(args.model_string) - gpt_tokenizer.add_special_tokens({'pad_token': PAD_TOKEN}) - gpt_pad_id = gpt_tokenizer.encode(PAD_TOKEN)[0] - gpt_model = AutoModelWithLMHead.from_pretrained(args.model_string).to(args.device) - gpt_model.eval() - - checkpoint = torch.load(args.iambic_ckpt, map_location=args.device) - model_args = checkpoint['args'] - iambic_model = Model(model_args, gpt_pad_id, len(dataset_info.index2word)) # no need to get the glove embeddings when reloading since they're saved in model ckpt anyway - iambic_model.load_state_dict(checkpoint['state_dict']) - iambic_model = iambic_model.to(args.device) - iambic_model.eval() - if args.verbose: - print("=> loaded checkpoint '{}' (epoch {})" - .format(args.iambic_ckpt, checkpoint['epoch'])) - print('iambic model num params', num_params(iambic_model)) - - with open(args.rhyme_info, 'rb') as rf: - rhyme_info = pickle.load(rf) - checkpoint = torch.load(args.rhyme_ckpt, map_location=args.device) - model_args = checkpoint['args'] - rhyme_model = Model(model_args, gpt_pad_id, len(dataset_info.index2word), rhyme_group_size=len(rhyme_info.index2rhyme_group), verbose=args.verbose) # no need to get the glove embeddings when reloading since they're saved in model ckpt anyway - rhyme_model.load_state_dict(checkpoint['state_dict']) - rhyme_model = rhyme_model.to(args.device) - rhyme_model.eval() - if args.verbose: - print("=> loaded checkpoint '{}' (epoch {})" - .format(args.rhyme_ckpt, checkpoint['epoch'])) - print('rhyme model num params', num_params(rhyme_model)) - - checkpoint = torch.load(args.newline_ckpt, map_location=args.device) - model_args = checkpoint['args'] - newline_model = Model(model_args, gpt_pad_id, len(dataset_info.index2word)) # no need to get the glove embeddings when reloading since they're saved in model ckpt anyway - newline_model.load_state_dict(checkpoint['state_dict']) - newline_model = newline_model.to(args.device) - newline_model.eval() - if args.verbose: - print("=> loaded checkpoint '{}' (epoch {})" - .format(args.newline_ckpt, checkpoint['epoch'])) - print('iambic model num params', num_params(newline_model)) - - with open(args.prefix_file, 'r') as rf: - lines = rf.readlines() - for line in tqdm(lines, total=len(lines)): - couplet = predict_couplet(gpt_model, - gpt_tokenizer, - iambic_model, - rhyme_model, - newline_model, - [line], - dataset_info, - rhyme_info, - args.precondition_topk, - args.topk, - condition_lambda=args.condition_lambda, - device=args.device) - assert len(couplet) == 2 - print(couplet[1].strip().replace('\n', '')) - - -if __name__=='__main__': - parser = ArgumentParser() - - # DATA - parser.add_argument('--iambic_ckpt', type=str, required=True) - parser.add_argument('--rhyme_ckpt', type=str, required=True) - parser.add_argument('--newline_ckpt', type=str, required=True) - parser.add_argument('--dataset_info', type=str, required=True, help='saved dataset info') - parser.add_argument('--rhyme_info', type=str, required=True, help='saved rhyme info') - parser.add_argument('--model_string', type=str, default='gpt2-medium') - - parser.add_argument('--prefix_file', type=str, default=None, required=True, help='file of prefix lines for couplets') - - parser.add_argument('--precondition_topk', type=int, default=200, help='consider top k outputs from gpt at each step before conditioning and re-pruning') - parser.add_argument('--topk', type=int, default=10, help='consider top k outputs from gpt at each step') - parser.add_argument('--condition_lambda', type=float, default=1.0, help='lambda weight on conditioning model') - - parser.add_argument('--seed', type=int, default=1, help='random seed') - parser.add_argument('--device', type=str, default='cuda', choices=['cpu', 'cuda']) - parser.add_argument('--debug', action='store_true', default=False) - parser.add_argument('--verbose', action='store_true', default=False) - - args = parser.parse_args() - - random.seed(args.seed) - np.random.seed(args.seed) - torch.manual_seed(args.seed) - - main(args) \ No newline at end of file diff --git a/spaces/Felix123456/bingo/src/components/ui/dropdown-menu.tsx b/spaces/Felix123456/bingo/src/components/ui/dropdown-menu.tsx deleted file mode 100644 index 184d4e6007ef85187446362f69532ab077897fea..0000000000000000000000000000000000000000 --- a/spaces/Felix123456/bingo/src/components/ui/dropdown-menu.tsx +++ /dev/null @@ -1,128 +0,0 @@ -'use client' - -import * as React from 'react' -import * as DropdownMenuPrimitive from '@radix-ui/react-dropdown-menu' - -import { cn } from '@/lib/utils' - -const DropdownMenu = DropdownMenuPrimitive.Root - -const DropdownMenuTrigger = DropdownMenuPrimitive.Trigger - -const DropdownMenuGroup = DropdownMenuPrimitive.Group - -const DropdownMenuPortal = DropdownMenuPrimitive.Portal - -const DropdownMenuSub = DropdownMenuPrimitive.Sub - -const DropdownMenuRadioGroup = DropdownMenuPrimitive.RadioGroup - -const DropdownMenuSubContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DropdownMenuSubContent.displayName = - DropdownMenuPrimitive.SubContent.displayName - -const DropdownMenuContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, sideOffset = 4, ...props }, ref) => ( - - - -)) -DropdownMenuContent.displayName = DropdownMenuPrimitive.Content.displayName - -const DropdownMenuItem = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef & { - inset?: boolean - } ->(({ className, inset, ...props }, ref) => ( - -)) -DropdownMenuItem.displayName = DropdownMenuPrimitive.Item.displayName - -const DropdownMenuLabel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef & { - inset?: boolean - } ->(({ className, inset, ...props }, ref) => ( - -)) -DropdownMenuLabel.displayName = DropdownMenuPrimitive.Label.displayName - -const DropdownMenuSeparator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DropdownMenuSeparator.displayName = DropdownMenuPrimitive.Separator.displayName - -const DropdownMenuShortcut = ({ - className, - ...props -}: React.HTMLAttributes) => { - return ( - - ) -} -DropdownMenuShortcut.displayName = 'DropdownMenuShortcut' - -export { - DropdownMenu, - DropdownMenuTrigger, - DropdownMenuContent, - DropdownMenuItem, - DropdownMenuLabel, - DropdownMenuSeparator, - DropdownMenuShortcut, - DropdownMenuGroup, - DropdownMenuPortal, - DropdownMenuSub, - DropdownMenuSubContent, - DropdownMenuRadioGroup -} diff --git a/spaces/Felix123456/bingo/src/pages/api/create.ts b/spaces/Felix123456/bingo/src/pages/api/create.ts deleted file mode 100644 index 508fa97ef609cbb215a61085711638e116235ebe..0000000000000000000000000000000000000000 --- a/spaces/Felix123456/bingo/src/pages/api/create.ts +++ /dev/null @@ -1,31 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { fetch, debug } from '@/lib/isomorphic' -import { createHeaders } from '@/lib/utils' - -// const API_ENDPOINT = 'https://www.bing.com/turing/conversation/create' -const API_ENDPOINT = 'https://edgeservices.bing.com/edgesvc/turing/conversation/create'; - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const headers = createHeaders(req.cookies) - - res.writeHead(200, { - 'Content-Type': 'application/json', - }) - - debug('headers', headers) - const response = await fetch(API_ENDPOINT, { method: 'GET', headers }) - .then((res) => res.text()) - - res.end(response) - } catch (e) { - return res.end(JSON.stringify({ - result: { - value: 'UnauthorizedRequest', - message: `${e}` - } - })) - } -} diff --git a/spaces/Ferion/image-matting-app/ppmatting/transforms/transforms.py b/spaces/Ferion/image-matting-app/ppmatting/transforms/transforms.py deleted file mode 100644 index afd28b4917a890890820e56785b81c841b2d387a..0000000000000000000000000000000000000000 --- a/spaces/Ferion/image-matting-app/ppmatting/transforms/transforms.py +++ /dev/null @@ -1,791 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import random -import string - -import cv2 -import numpy as np -from paddleseg.transforms import functional -from paddleseg.cvlibs import manager -from paddleseg.utils import seg_env -from PIL import Image - - -@manager.TRANSFORMS.add_component -class Compose: - """ - Do transformation on input data with corresponding pre-processing and augmentation operations. - The shape of input data to all operations is [height, width, channels]. - """ - - def __init__(self, transforms, to_rgb=True): - if not isinstance(transforms, list): - raise TypeError('The transforms must be a list!') - self.transforms = transforms - self.to_rgb = to_rgb - - def __call__(self, data): - """ - Args: - data (dict): The data to transform. - - Returns: - dict: Data after transformation - """ - if 'trans_info' not in data: - data['trans_info'] = [] - for op in self.transforms: - data = op(data) - if data is None: - return None - - data['img'] = np.transpose(data['img'], (2, 0, 1)) - for key in data.get('gt_fields', []): - if len(data[key].shape) == 2: - continue - data[key] = np.transpose(data[key], (2, 0, 1)) - - return data - - -@manager.TRANSFORMS.add_component -class LoadImages: - def __init__(self, to_rgb=False): - self.to_rgb = to_rgb - - def __call__(self, data): - if isinstance(data['img'], str): - data['img'] = cv2.imread(data['img']) - for key in data.get('gt_fields', []): - if isinstance(data[key], str): - data[key] = cv2.imread(data[key], cv2.IMREAD_UNCHANGED) - # if alpha and trimap has 3 channels, extract one. - if key in ['alpha', 'trimap']: - if len(data[key].shape) > 2: - data[key] = data[key][:, :, 0] - - if self.to_rgb: - data['img'] = cv2.cvtColor(data['img'], cv2.COLOR_BGR2RGB) - for key in data.get('gt_fields', []): - if len(data[key].shape) == 2: - continue - data[key] = cv2.cvtColor(data[key], cv2.COLOR_BGR2RGB) - - return data - - -@manager.TRANSFORMS.add_component -class Resize: - def __init__(self, target_size=(512, 512), random_interp=False): - if isinstance(target_size, list) or isinstance(target_size, tuple): - if len(target_size) != 2: - raise ValueError( - '`target_size` should include 2 elements, but it is {}'. - format(target_size)) - else: - raise TypeError( - "Type of `target_size` is invalid. It should be list or tuple, but it is {}" - .format(type(target_size))) - - self.target_size = target_size - self.random_interp = random_interp - self.interps = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC] - - def __call__(self, data): - if self.random_interp: - interp = np.random.choice(self.interps) - else: - interp = cv2.INTER_LINEAR - data['trans_info'].append(('resize', data['img'].shape[0:2])) - data['img'] = functional.resize(data['img'], self.target_size, interp) - for key in data.get('gt_fields', []): - if key == 'trimap': - data[key] = functional.resize(data[key], self.target_size, - cv2.INTER_NEAREST) - else: - data[key] = functional.resize(data[key], self.target_size, - interp) - return data - - -@manager.TRANSFORMS.add_component -class RandomResize: - """ - Resize image to a size determinned by `scale` and `size`. - - Args: - size(tuple|list): The reference size to resize. A tuple or list with length 2. - scale(tupel|list, optional): A range of scale base on `size`. A tuple or list with length 2. Default: None. - """ - - def __init__(self, size=None, scale=None): - if isinstance(size, list) or isinstance(size, tuple): - if len(size) != 2: - raise ValueError( - '`size` should include 2 elements, but it is {}'.format( - size)) - elif size is not None: - raise TypeError( - "Type of `size` is invalid. It should be list or tuple, but it is {}" - .format(type(size))) - - if scale is not None: - if isinstance(scale, list) or isinstance(scale, tuple): - if len(scale) != 2: - raise ValueError( - '`scale` should include 2 elements, but it is {}'. - format(scale)) - else: - raise TypeError( - "Type of `scale` is invalid. It should be list or tuple, but it is {}" - .format(type(scale))) - self.size = size - self.scale = scale - - def __call__(self, data): - h, w = data['img'].shape[:2] - if self.scale is not None: - scale = np.random.uniform(self.scale[0], self.scale[1]) - else: - scale = 1. - if self.size is not None: - scale_factor = max(self.size[0] / w, self.size[1] / h) - else: - scale_factor = 1 - scale = scale * scale_factor - - w = int(round(w * scale)) - h = int(round(h * scale)) - data['img'] = functional.resize(data['img'], (w, h)) - for key in data.get('gt_fields', []): - if key == 'trimap': - data[key] = functional.resize(data[key], (w, h), - cv2.INTER_NEAREST) - else: - data[key] = functional.resize(data[key], (w, h)) - return data - - -@manager.TRANSFORMS.add_component -class ResizeByLong: - """ - Resize the long side of an image to given size, and then scale the other side proportionally. - - Args: - long_size (int): The target size of long side. - """ - - def __init__(self, long_size): - self.long_size = long_size - - def __call__(self, data): - data['trans_info'].append(('resize', data['img'].shape[0:2])) - data['img'] = functional.resize_long(data['img'], self.long_size) - for key in data.get('gt_fields', []): - if key == 'trimap': - data[key] = functional.resize_long(data[key], self.long_size, - cv2.INTER_NEAREST) - else: - data[key] = functional.resize_long(data[key], self.long_size) - return data - - -@manager.TRANSFORMS.add_component -class ResizeByShort: - """ - Resize the short side of an image to given size, and then scale the other side proportionally. - - Args: - short_size (int): The target size of short side. - """ - - def __init__(self, short_size): - self.short_size = short_size - - def __call__(self, data): - data['trans_info'].append(('resize', data['img'].shape[0:2])) - data['img'] = functional.resize_short(data['img'], self.short_size) - for key in data.get('gt_fields', []): - if key == 'trimap': - data[key] = functional.resize_short(data[key], self.short_size, - cv2.INTER_NEAREST) - else: - data[key] = functional.resize_short(data[key], self.short_size) - return data - - -@manager.TRANSFORMS.add_component -class ResizeToIntMult: - """ - Resize to some int muitple, d.g. 32. - """ - - def __init__(self, mult_int=32): - self.mult_int = mult_int - - def __call__(self, data): - data['trans_info'].append(('resize', data['img'].shape[0:2])) - - h, w = data['img'].shape[0:2] - rw = w - w % self.mult_int - rh = h - h % self.mult_int - data['img'] = functional.resize(data['img'], (rw, rh)) - for key in data.get('gt_fields', []): - if key == 'trimap': - data[key] = functional.resize(data[key], (rw, rh), - cv2.INTER_NEAREST) - else: - data[key] = functional.resize(data[key], (rw, rh)) - - return data - - -@manager.TRANSFORMS.add_component -class Normalize: - """ - Normalize an image. - - Args: - mean (list, optional): The mean value of a data set. Default: [0.5, 0.5, 0.5]. - std (list, optional): The standard deviation of a data set. Default: [0.5, 0.5, 0.5]. - - Raises: - ValueError: When mean/std is not list or any value in std is 0. - """ - - def __init__(self, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)): - self.mean = mean - self.std = std - if not (isinstance(self.mean, - (list, tuple)) and isinstance(self.std, - (list, tuple))): - raise ValueError( - "{}: input type is invalid. It should be list or tuple".format( - self)) - from functools import reduce - if reduce(lambda x, y: x * y, self.std) == 0: - raise ValueError('{}: std is invalid!'.format(self)) - - def __call__(self, data): - mean = np.array(self.mean)[np.newaxis, np.newaxis, :] - std = np.array(self.std)[np.newaxis, np.newaxis, :] - data['img'] = functional.normalize(data['img'], mean, std) - if 'fg' in data.get('gt_fields', []): - data['fg'] = functional.normalize(data['fg'], mean, std) - if 'bg' in data.get('gt_fields', []): - data['bg'] = functional.normalize(data['bg'], mean, std) - - return data - - -@manager.TRANSFORMS.add_component -class RandomCropByAlpha: - """ - Randomly crop while centered on uncertain area by a certain probability. - - Args: - crop_size (tuple|list): The size you want to crop from image. - p (float): The probability centered on uncertain area. - - """ - - def __init__(self, crop_size=((320, 320), (480, 480), (640, 640)), - prob=0.5): - self.crop_size = crop_size - self.prob = prob - - def __call__(self, data): - idex = np.random.randint(low=0, high=len(self.crop_size)) - crop_w, crop_h = self.crop_size[idex] - - img_h = data['img'].shape[0] - img_w = data['img'].shape[1] - if np.random.rand() < self.prob: - crop_center = np.where((data['alpha'] > 0) & (data['alpha'] < 255)) - center_h_array, center_w_array = crop_center - if len(center_h_array) == 0: - return data - rand_ind = np.random.randint(len(center_h_array)) - center_h = center_h_array[rand_ind] - center_w = center_w_array[rand_ind] - delta_h = crop_h // 2 - delta_w = crop_w // 2 - start_h = max(0, center_h - delta_h) - start_w = max(0, center_w - delta_w) - else: - start_h = 0 - start_w = 0 - if img_h > crop_h: - start_h = np.random.randint(img_h - crop_h + 1) - if img_w > crop_w: - start_w = np.random.randint(img_w - crop_w + 1) - - end_h = min(img_h, start_h + crop_h) - end_w = min(img_w, start_w + crop_w) - - data['img'] = data['img'][start_h:end_h, start_w:end_w] - for key in data.get('gt_fields', []): - data[key] = data[key][start_h:end_h, start_w:end_w] - - return data - - -@manager.TRANSFORMS.add_component -class RandomCrop: - """ - Randomly crop - - Args: - crop_size (tuple|list): The size you want to crop from image. - """ - - def __init__(self, crop_size=((320, 320), (480, 480), (640, 640))): - if not isinstance(crop_size[0], (list, tuple)): - crop_size = [crop_size] - self.crop_size = crop_size - - def __call__(self, data): - idex = np.random.randint(low=0, high=len(self.crop_size)) - crop_w, crop_h = self.crop_size[idex] - img_h, img_w = data['img'].shape[0:2] - - start_h = 0 - start_w = 0 - if img_h > crop_h: - start_h = np.random.randint(img_h - crop_h + 1) - if img_w > crop_w: - start_w = np.random.randint(img_w - crop_w + 1) - - end_h = min(img_h, start_h + crop_h) - end_w = min(img_w, start_w + crop_w) - - data['img'] = data['img'][start_h:end_h, start_w:end_w] - for key in data.get('gt_fields', []): - data[key] = data[key][start_h:end_h, start_w:end_w] - - return data - - -@manager.TRANSFORMS.add_component -class LimitLong: - """ - Limit the long edge of image. - - If the long edge is larger than max_long, resize the long edge - to max_long, while scale the short edge proportionally. - - If the long edge is smaller than min_long, resize the long edge - to min_long, while scale the short edge proportionally. - - Args: - max_long (int, optional): If the long edge of image is larger than max_long, - it will be resize to max_long. Default: None. - min_long (int, optional): If the long edge of image is smaller than min_long, - it will be resize to min_long. Default: None. - """ - - def __init__(self, max_long=None, min_long=None): - if max_long is not None: - if not isinstance(max_long, int): - raise TypeError( - "Type of `max_long` is invalid. It should be int, but it is {}" - .format(type(max_long))) - if min_long is not None: - if not isinstance(min_long, int): - raise TypeError( - "Type of `min_long` is invalid. It should be int, but it is {}" - .format(type(min_long))) - if (max_long is not None) and (min_long is not None): - if min_long > max_long: - raise ValueError( - '`max_long should not smaller than min_long, but they are {} and {}' - .format(max_long, min_long)) - self.max_long = max_long - self.min_long = min_long - - def __call__(self, data): - h, w = data['img'].shape[:2] - long_edge = max(h, w) - target = long_edge - if (self.max_long is not None) and (long_edge > self.max_long): - target = self.max_long - elif (self.min_long is not None) and (long_edge < self.min_long): - target = self.min_long - - data['trans_info'].append(('resize', data['img'].shape[0:2])) - if target != long_edge: - data['img'] = functional.resize_long(data['img'], target) - for key in data.get('gt_fields', []): - if key == 'trimap': - data[key] = functional.resize_long(data[key], target, - cv2.INTER_NEAREST) - else: - data[key] = functional.resize_long(data[key], target) - - return data - - -@manager.TRANSFORMS.add_component -class LimitShort: - """ - Limit the short edge of image. - - If the short edge is larger than max_short, resize the short edge - to max_short, while scale the long edge proportionally. - - If the short edge is smaller than min_short, resize the short edge - to min_short, while scale the long edge proportionally. - - Args: - max_short (int, optional): If the short edge of image is larger than max_short, - it will be resize to max_short. Default: None. - min_short (int, optional): If the short edge of image is smaller than min_short, - it will be resize to min_short. Default: None. - """ - - def __init__(self, max_short=None, min_short=None): - if max_short is not None: - if not isinstance(max_short, int): - raise TypeError( - "Type of `max_short` is invalid. It should be int, but it is {}" - .format(type(max_short))) - if min_short is not None: - if not isinstance(min_short, int): - raise TypeError( - "Type of `min_short` is invalid. It should be int, but it is {}" - .format(type(min_short))) - if (max_short is not None) and (min_short is not None): - if min_short > max_short: - raise ValueError( - '`max_short should not smaller than min_short, but they are {} and {}' - .format(max_short, min_short)) - self.max_short = max_short - self.min_short = min_short - - def __call__(self, data): - h, w = data['img'].shape[:2] - short_edge = min(h, w) - target = short_edge - if (self.max_short is not None) and (short_edge > self.max_short): - target = self.max_short - elif (self.min_short is not None) and (short_edge < self.min_short): - target = self.min_short - - data['trans_info'].append(('resize', data['img'].shape[0:2])) - if target != short_edge: - data['img'] = functional.resize_short(data['img'], target) - for key in data.get('gt_fields', []): - if key == 'trimap': - data[key] = functional.resize_short(data[key], target, - cv2.INTER_NEAREST) - else: - data[key] = functional.resize_short(data[key], target) - - return data - - -@manager.TRANSFORMS.add_component -class RandomHorizontalFlip: - """ - Flip an image horizontally with a certain probability. - - Args: - prob (float, optional): A probability of horizontally flipping. Default: 0.5. - """ - - def __init__(self, prob=0.5): - self.prob = prob - - def __call__(self, data): - if random.random() < self.prob: - data['img'] = functional.horizontal_flip(data['img']) - for key in data.get('gt_fields', []): - data[key] = functional.horizontal_flip(data[key]) - - return data - - -@manager.TRANSFORMS.add_component -class RandomBlur: - """ - Blurring an image by a Gaussian function with a certain probability. - - Args: - prob (float, optional): A probability of blurring an image. Default: 0.1. - """ - - def __init__(self, prob=0.1): - self.prob = prob - - def __call__(self, data): - if self.prob <= 0: - n = 0 - elif self.prob >= 1: - n = 1 - else: - n = int(1.0 / self.prob) - if n > 0: - if np.random.randint(0, n) == 0: - radius = np.random.randint(3, 10) - if radius % 2 != 1: - radius = radius + 1 - if radius > 9: - radius = 9 - data['img'] = cv2.GaussianBlur(data['img'], (radius, radius), 0, - 0) - for key in data.get('gt_fields', []): - if key == 'trimap': - continue - data[key] = cv2.GaussianBlur(data[key], (radius, radius), 0, - 0) - return data - - -@manager.TRANSFORMS.add_component -class RandomDistort: - """ - Distort an image with random configurations. - - Args: - brightness_range (float, optional): A range of brightness. Default: 0.5. - brightness_prob (float, optional): A probability of adjusting brightness. Default: 0.5. - contrast_range (float, optional): A range of contrast. Default: 0.5. - contrast_prob (float, optional): A probability of adjusting contrast. Default: 0.5. - saturation_range (float, optional): A range of saturation. Default: 0.5. - saturation_prob (float, optional): A probability of adjusting saturation. Default: 0.5. - hue_range (int, optional): A range of hue. Default: 18. - hue_prob (float, optional): A probability of adjusting hue. Default: 0.5. - """ - - def __init__(self, - brightness_range=0.5, - brightness_prob=0.5, - contrast_range=0.5, - contrast_prob=0.5, - saturation_range=0.5, - saturation_prob=0.5, - hue_range=18, - hue_prob=0.5): - self.brightness_range = brightness_range - self.brightness_prob = brightness_prob - self.contrast_range = contrast_range - self.contrast_prob = contrast_prob - self.saturation_range = saturation_range - self.saturation_prob = saturation_prob - self.hue_range = hue_range - self.hue_prob = hue_prob - - def __call__(self, data): - brightness_lower = 1 - self.brightness_range - brightness_upper = 1 + self.brightness_range - contrast_lower = 1 - self.contrast_range - contrast_upper = 1 + self.contrast_range - saturation_lower = 1 - self.saturation_range - saturation_upper = 1 + self.saturation_range - hue_lower = -self.hue_range - hue_upper = self.hue_range - ops = [ - functional.brightness, functional.contrast, functional.saturation, - functional.hue - ] - random.shuffle(ops) - params_dict = { - 'brightness': { - 'brightness_lower': brightness_lower, - 'brightness_upper': brightness_upper - }, - 'contrast': { - 'contrast_lower': contrast_lower, - 'contrast_upper': contrast_upper - }, - 'saturation': { - 'saturation_lower': saturation_lower, - 'saturation_upper': saturation_upper - }, - 'hue': { - 'hue_lower': hue_lower, - 'hue_upper': hue_upper - } - } - prob_dict = { - 'brightness': self.brightness_prob, - 'contrast': self.contrast_prob, - 'saturation': self.saturation_prob, - 'hue': self.hue_prob - } - - im = data['img'].astype('uint8') - im = Image.fromarray(im) - for id in range(len(ops)): - params = params_dict[ops[id].__name__] - params['im'] = im - prob = prob_dict[ops[id].__name__] - if np.random.uniform(0, 1) < prob: - im = ops[id](**params) - data['img'] = np.asarray(im) - - for key in data.get('gt_fields', []): - if key in ['alpha', 'trimap']: - continue - else: - im = data[key].astype('uint8') - im = Image.fromarray(im) - for id in range(len(ops)): - params = params_dict[ops[id].__name__] - params['im'] = im - prob = prob_dict[ops[id].__name__] - if np.random.uniform(0, 1) < prob: - im = ops[id](**params) - data[key] = np.asarray(im) - return data - - -@manager.TRANSFORMS.add_component -class Padding: - """ - Add bottom-right padding to a raw image or annotation image. - - Args: - target_size (list|tuple): The target size after padding. - im_padding_value (list, optional): The padding value of raw image. - Default: [127.5, 127.5, 127.5]. - label_padding_value (int, optional): The padding value of annotation image. Default: 255. - - Raises: - TypeError: When target_size is neither list nor tuple. - ValueError: When the length of target_size is not 2. - """ - - def __init__(self, target_size, im_padding_value=(127.5, 127.5, 127.5)): - if isinstance(target_size, list) or isinstance(target_size, tuple): - if len(target_size) != 2: - raise ValueError( - '`target_size` should include 2 elements, but it is {}'. - format(target_size)) - else: - raise TypeError( - "Type of target_size is invalid. It should be list or tuple, now is {}" - .format(type(target_size))) - - self.target_size = target_size - self.im_padding_value = im_padding_value - - def __call__(self, data): - im_height, im_width = data['img'].shape[0], data['img'].shape[1] - target_height = self.target_size[1] - target_width = self.target_size[0] - pad_height = max(0, target_height - im_height) - pad_width = max(0, target_width - im_width) - data['trans_info'].append(('padding', data['img'].shape[0:2])) - if (pad_height == 0) and (pad_width == 0): - return data - else: - data['img'] = cv2.copyMakeBorder( - data['img'], - 0, - pad_height, - 0, - pad_width, - cv2.BORDER_CONSTANT, - value=self.im_padding_value) - for key in data.get('gt_fields', []): - if key in ['trimap', 'alpha']: - value = 0 - else: - value = self.im_padding_value - data[key] = cv2.copyMakeBorder( - data[key], - 0, - pad_height, - 0, - pad_width, - cv2.BORDER_CONSTANT, - value=value) - return data - - -@manager.TRANSFORMS.add_component -class RandomSharpen: - def __init__(self, prob=0.1): - if prob < 0: - self.prob = 0 - elif prob > 1: - self.prob = 1 - else: - self.prob = prob - - def __call__(self, data): - if np.random.rand() > self.prob: - return data - - radius = np.random.choice([0, 3, 5, 7, 9]) - w = np.random.uniform(0.1, 0.5) - blur_img = cv2.GaussianBlur(data['img'], (radius, radius), 5) - data['img'] = cv2.addWeighted(data['img'], 1 + w, blur_img, -w, 0) - for key in data.get('gt_fields', []): - if key == 'trimap' or key == 'alpha': - continue - blur_img = cv2.GaussianBlur(data[key], (0, 0), 5) - data[key] = cv2.addWeighted(data[key], 1.5, blur_img, -0.5, 0) - - return data - - -@manager.TRANSFORMS.add_component -class RandomNoise: - def __init__(self, prob=0.1): - if prob < 0: - self.prob = 0 - elif prob > 1: - self.prob = 1 - else: - self.prob = prob - - def __call__(self, data): - if np.random.rand() > self.prob: - return data - mean = np.random.uniform(0, 0.04) - var = np.random.uniform(0, 0.001) - noise = np.random.normal(mean, var**0.5, data['img'].shape) * 255 - data['img'] = data['img'] + noise - data['img'] = np.clip(data['img'], 0, 255) - - return data - - -@manager.TRANSFORMS.add_component -class RandomReJpeg: - def __init__(self, prob=0.1): - if prob < 0: - self.prob = 0 - elif prob > 1: - self.prob = 1 - else: - self.prob = prob - - def __call__(self, data): - if np.random.rand() > self.prob: - return data - q = np.random.randint(70, 95) - img = data['img'].astype('uint8') - - # Ensure no conflicts between processes - tmp_name = str(os.getpid()) + '.jpg' - tmp_name = os.path.join(seg_env.TMP_HOME, tmp_name) - cv2.imwrite(tmp_name, img, [int(cv2.IMWRITE_JPEG_QUALITY), q]) - data['img'] = cv2.imread(tmp_name) - - return data diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_coordinated_cylinder_tower.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_coordinated_cylinder_tower.py deleted file mode 100644 index 1b79a3b5434b985f50252856644def1ad5edbb25..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_coordinated_cylinder_tower.py +++ /dev/null @@ -1,53 +0,0 @@ -import numpy as np -import os -import pybullet as p -import random -from cliport.tasks import primitives -from cliport.tasks.grippers import Spatula -from cliport.tasks.task import Task -from cliport.utils import utils -import numpy as np -from cliport.tasks.task import Task -from cliport.utils import utils - -class ColorCoordinatedCylinderTower(Task): - """Stack cylinders of four different colors (red, blue, green, yellow) on top of each other on a square stand in a specific sequence. The bottom of the stack should start with a blue cylinder, follow by a green cylinder, then a red one, and finally a yellow cylinder at the top. Each cylinder has to be aligned correctly to avoid falling.""" - - def __init__(self): - super().__init__() - self.max_steps = 10 - self.lang_template = "Stack cylinders of four different colors (red, blue, green, yellow) on top of each other on a square stand in a specific sequence. The bottom of the stack should start with a blue cylinder, follow by a green cylinder, then a red one, and finally a yellow cylinder at the top." - self.task_completed_desc = "done stacking cylinders." - self.additional_reset() - - def reset(self, env): - super().reset(env) - - # Add base. - base_size = (0.05, 0.15, 0.005) - base_urdf = 'stacking/stand.urdf' - base_pose = self.get_random_pose(env, base_size) - env.add_object(base_urdf, base_pose, category='fixed') - - # Cylinder colors. - colors = [utils.COLORS['blue'], utils.COLORS['green'], utils.COLORS['red'], utils.COLORS['yellow']] - - # Add cylinders. - cylinder_size = (0.04, 0.04, 0.04) - cylinder_urdf = 'cylinder/cylinder-template.urdf' - - objs = [] - for i in range(4): - cylinder_pose = self.get_random_pose(env, cylinder_size) - cylinder_id = env.add_object(cylinder_urdf, cylinder_pose, color=colors[i]) - objs.append(cylinder_id) - - # Associate placement locations for goals. - place_pos = [(0, 0, 0.03), (0, 0, 0.08), (0, 0, 0.13), (0, 0, 0.18)] - targs = [(utils.apply(base_pose, i), base_pose[1]) for i in place_pos] - - # Goal: cylinders are stacked in a tower (bottom to top: blue, green, red, yellow). - for i in range(4): - self.add_goal(objs=[objs[i]], matches=np.ones((1, 1)), targ_poses=[targs[i]], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1 / 4, symmetries=[np.pi/2], - language_goal=self.lang_template) \ No newline at end of file diff --git a/spaces/Gen-Sim/Gen-Sim/gensim/critic.py b/spaces/Gen-Sim/Gen-Sim/gensim/critic.py deleted file mode 100644 index 8cde0240552141276d6dc71f1e726db6a56948eb..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/gensim/critic.py +++ /dev/null @@ -1,85 +0,0 @@ -import numpy as np -import os -import IPython - -import traceback -import json -from gensim.utils import ( - save_text, - add_to_txt, - extract_dict, - format_dict_prompt, - generate_feedback, -) -import copy -import random - -class Critic: - """ - class that reflects and criticizes new task for improvement - """ - def __init__(self, cfg, memory): - self.prompt_folder = f"prompts/{cfg['prompt_folder']}" - self.memory = memory - self.chat_log = self.memory.chat_log - self.cfg = cfg - self.model_output_dir = cfg["model_output_dir"] - - def error_review(self, new_task): - """ commonly made error review """ - if os.path.exists(f"{self.prompt_folder}/cliport_prompt_common_errors_template.txt") and "task-name" in new_task: - self.chat_log = add_to_txt(self.chat_log, "================= Error Book Preview!", with_print=True) - errorbook_prompt_text = open(f'{self.prompt_folder}/cliport_prompt_common_errors_template.txt').read() - errorbook_prompt_text = errorbook_prompt_text.replace("TASK_NAME_TEMPLATE", new_task["task-name"]) - res = generate_feedback(errorbook_prompt_text, temperature=0., interaction_txt=self.chat_log) # cfg['gpt_temperature'] - - def reflection(self, new_task, new_code, current_tasks=None): - """ reflect on if the new task needs to be added """ - all_add_to_the_task_list_flag = True - - if os.path.exists(f"{self.prompt_folder}/cliport_prompt_task_reflection.txt"): - # only consider successful task - self.chat_log = add_to_txt(self.chat_log, "================= Code Reflect!", with_print=True) - total_tasks = copy.deepcopy(self.memory.online_task_buffer) - if current_tasks is not None: - # adding all the tasks in the current run. at least should not overlap with those - for t in current_tasks: - total_tasks[t['task-name']] = t - - # need to load more - total_tasks = self.memory.online_task_buffer - MAX_NUM = 40 - if len(total_tasks) > MAX_NUM: - total_tasks = dict(random.sample(total_tasks.items(), MAX_NUM)) - - print("reflection history task num:", len(total_tasks)) - task_descriptions_replacement_str = format_dict_prompt(total_tasks, -1) - - # append current new task - code_reflection_prompt_text = open(f"{self.prompt_folder}/cliport_prompt_task_reflection.txt").read() - code_reflection_prompt_text = code_reflection_prompt_text.replace("CURRENT_TASK_NAME_TEMPLATE", str(task_descriptions_replacement_str)) - code_reflection_prompt_text = code_reflection_prompt_text.replace("TASK_STRING_TEMPLATE", str(new_task)) - code_reflection_prompt_text = code_reflection_prompt_text.replace("TASK_CODE_TEMPLATE", str(new_code)) - if len(self.cfg['target_task_name']) > 0: - code_reflection_prompt_text = code_reflection_prompt_text.replace("TARGET_TASK_NAME", self.cfg['target_task_name']) - - # no matter - total_tasks[new_task["task-name"].replace("-", "_")] = str(new_task) - res = generate_feedback(code_reflection_prompt_text, temperature=0.4, interaction_txt=self.chat_log, n=int(self.cfg['reflection_agreement_num'])) # cfg['gpt_temperature'] - all_add_to_the_task_list_flag = True - - for idx, r in enumerate(res): - # iterate through for agreement - reflection_def_cmd = extract_dict(r, prefix='task_reflection') - exec(reflection_def_cmd, globals()) - try: - print(f"critic {idx}:", task_reflection) - - if task_reflection["add_to_the_task_list"] == 'False': - all_add_to_the_task_list_flag = False - print(f"critic {idx} suggests not adding this task to the buffer! ") - except: - IPython.embed() - save_text(self.model_output_dir, new_task['task-name'] + "_reflection_output", str(task_reflection)) - - return all_add_to_the_task_list_flag \ No newline at end of file diff --git a/spaces/GeorgeOrville/bingo/src/components/button-scroll-to-bottom.tsx b/spaces/GeorgeOrville/bingo/src/components/button-scroll-to-bottom.tsx deleted file mode 100644 index b68ab9c0e48320c356e51a52d11b9ca63909e6c5..0000000000000000000000000000000000000000 --- a/spaces/GeorgeOrville/bingo/src/components/button-scroll-to-bottom.tsx +++ /dev/null @@ -1,34 +0,0 @@ -'use client' - -import * as React from 'react' - -import { cn } from '@/lib/utils' -import { useAtBottom } from '@/lib/hooks/use-at-bottom' -import { Button, type ButtonProps } from '@/components/ui/button' -import { IconArrowDown } from '@/components/ui/icons' - -export function ButtonScrollToBottom({ className, ...props }: ButtonProps) { - const isAtBottom = useAtBottom() - - return ( - - ) -} diff --git a/spaces/Gradio-Blocks/anime-colorization/datasets/lsun_bedroom.py b/spaces/Gradio-Blocks/anime-colorization/datasets/lsun_bedroom.py deleted file mode 100644 index 6a5be22eef8c7434331a76ef5ed7332a98a446ef..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/anime-colorization/datasets/lsun_bedroom.py +++ /dev/null @@ -1,54 +0,0 @@ -""" -Convert an LSUN lmdb database into a directory of images. -""" - -import argparse -import io -import os - -from PIL import Image -import lmdb -import numpy as np - - -def read_images(lmdb_path, image_size): - env = lmdb.open(lmdb_path, map_size=1099511627776, max_readers=100, readonly=True) - with env.begin(write=False) as transaction: - cursor = transaction.cursor() - for _, webp_data in cursor: - img = Image.open(io.BytesIO(webp_data)) - width, height = img.size - scale = image_size / min(width, height) - img = img.resize( - (int(round(scale * width)), int(round(scale * height))), - resample=Image.BOX, - ) - arr = np.array(img) - h, w, _ = arr.shape - h_off = (h - image_size) // 2 - w_off = (w - image_size) // 2 - arr = arr[h_off : h_off + image_size, w_off : w_off + image_size] - yield arr - - -def dump_images(out_dir, images, prefix): - if not os.path.exists(out_dir): - os.mkdir(out_dir) - for i, img in enumerate(images): - Image.fromarray(img).save(os.path.join(out_dir, f"{prefix}_{i:07d}.png")) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--image-size", help="new image size", type=int, default=256) - parser.add_argument("--prefix", help="class name", type=str, default="bedroom") - parser.add_argument("lmdb_path", help="path to an LSUN lmdb database") - parser.add_argument("out_dir", help="path to output directory") - args = parser.parse_args() - - images = read_images(args.lmdb_path, args.image_size) - dump_images(args.out_dir, images, args.prefix) - - -if __name__ == "__main__": - main() diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py deleted file mode 100644 index 61b9751057f10f2173b8e7edde12cca53ebbd2d0..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py +++ /dev/null @@ -1,19 +0,0 @@ -_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' -model = dict( - bbox_head=dict( - loss_cls=dict( - _delete_=True, - type='GHMC', - bins=30, - momentum=0.75, - use_sigmoid=True, - loss_weight=1.0), - loss_bbox=dict( - _delete_=True, - type='GHMR', - mu=0.02, - bins=10, - momentum=0.7, - loss_weight=10.0))) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/hrnet/fcn_hr18_480x480_40k_pascal_context_59.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/hrnet/fcn_hr18_480x480_40k_pascal_context_59.py deleted file mode 100644 index d2eecf01637b1ef605fdd5c20833cc2e06accbc0..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/hrnet/fcn_hr18_480x480_40k_pascal_context_59.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_context_59.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(num_classes=59), - test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) -optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/docs/MUSICGEN.md b/spaces/GrandaddyShmax/AudioCraft_Plus/docs/MUSICGEN.md deleted file mode 100644 index 606ce85808a428432f4e77564fb97dcade3851a3..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/docs/MUSICGEN.md +++ /dev/null @@ -1,362 +0,0 @@ -# MusicGen: Simple and Controllable Music Generation - -AudioCraft provides the code and models for MusicGen, [a simple and controllable model for music generation][arxiv]. -MusicGen is a single stage auto-regressive Transformer model trained over a 32kHz -EnCodec tokenizer with 4 codebooks sampled at 50 Hz. -Unlike existing methods like [MusicLM](https://arxiv.org/abs/2301.11325), MusicGen doesn't require -a self-supervised semantic representation, and it generates all 4 codebooks in one pass. By introducing -a small delay between the codebooks, we show we can predict them in parallel, thus having only 50 auto-regressive -steps per second of audio. -Check out our [sample page][musicgen_samples] or test the available demo! - - - Open In Colab - - - Open in HugginFace - -
- -We use 20K hours of licensed music to train MusicGen. Specifically, we rely on an internal dataset -of 10K high-quality music tracks, and on the ShutterStock and Pond5 music data. - - -## Model Card - -See [the model card](../model_cards/MUSICGEN_MODEL_CARD.md). - - -## Installation - -Please follow the AudioCraft installation instructions from the [README](../README.md). - -AudioCraft requires a GPU with at least 16 GB of memory for running inference with the medium-sized models (~1.5B parameters). - -## Usage - -We offer a number of way to interact with MusicGen: -1. A demo is also available on the [`facebook/MusicGen` Hugging Face Space](https://huggingface.co/spaces/facebook/MusicGen) -(huge thanks to all the HF team for their support). -2. You can run the extended demo on a Colab: -[colab notebook](https://colab.research.google.com/drive/1JlTOjB-G0A2Hz3h8PK63vLZk4xdCI5QB?usp=sharing) -3. You can use the gradio demo locally by running [`python -m demos.musicgen_app --share`](../demos/musicgen_app.py). -4. You can play with MusicGen by running the jupyter notebook at [`demos/musicgen_demo.ipynb`](../demos/musicgen_demo.ipynb) locally (if you have a GPU). -5. Finally, checkout [@camenduru Colab page](https://github.com/camenduru/MusicGen-colab) -which is regularly updated with contributions from @camenduru and the community. - - -## API - -We provide a simple API and 4 pre-trained models. The pre trained models are: -- `facebook/musicgen-small`: 300M model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-small) -- `facebook/musicgen-medium`: 1.5B model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-medium) -- `facebook/musicgen-melody`: 1.5B model, text to music and text+melody to music - [🤗 Hub](https://huggingface.co/facebook/musicgen-melody) -- `facebook/musicgen-large`: 3.3B model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-large) - -We observe the best trade-off between quality and compute with the `facebook/musicgen-medium` or `facebook/musicgen-melody` model. -In order to use MusicGen locally **you must have a GPU**. We recommend 16GB of memory, but smaller -GPUs will be able to generate short sequences, or longer sequences with the `facebook/musicgen-small` model. - -See after a quick example for using the API. - -```python -import torchaudio -from audiocraft.models import MusicGen -from audiocraft.data.audio import audio_write - -model = MusicGen.get_pretrained('facebook/musicgen-melody') -model.set_generation_params(duration=8) # generate 8 seconds. -wav = model.generate_unconditional(4) # generates 4 unconditional audio samples -descriptions = ['happy rock', 'energetic EDM', 'sad jazz'] -wav = model.generate(descriptions) # generates 3 samples. - -melody, sr = torchaudio.load('./assets/bach.mp3') -# generates using the melody from the given audio and the provided descriptions. -wav = model.generate_with_chroma(descriptions, melody[None].expand(3, -1, -1), sr) - -for idx, one_wav in enumerate(wav): - # Will save under {idx}.wav, with loudness normalization at -14 db LUFS. - audio_write(f'{idx}', one_wav.cpu(), model.sample_rate, strategy="loudness", loudness_compressor=True) -``` - -## 🤗 Transformers Usage - -MusicGen is available in the 🤗 Transformers library from version 4.31.0 onwards, requiring minimal dependencies -and additional packages. Steps to get started: - -1. First install the 🤗 [Transformers library](https://github.com/huggingface/transformers) from main: - -```shell -pip install git+https://github.com/huggingface/transformers.git -``` - -2. Run the following Python code to generate text-conditional audio samples: - -```py -from transformers import AutoProcessor, MusicgenForConditionalGeneration - - -processor = AutoProcessor.from_pretrained("facebook/musicgen-small") -model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") - -inputs = processor( - text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"], - padding=True, - return_tensors="pt", -) - -audio_values = model.generate(**inputs, max_new_tokens=256) -``` - -3. Listen to the audio samples either in an ipynb notebook: - -```py -from IPython.display import Audio - -sampling_rate = model.config.audio_encoder.sampling_rate -Audio(audio_values[0].numpy(), rate=sampling_rate) -``` - -Or save them as a `.wav` file using a third-party library, e.g. `scipy`: - -```py -import scipy - -sampling_rate = model.config.audio_encoder.sampling_rate -scipy.io.wavfile.write("musicgen_out.wav", rate=sampling_rate, data=audio_values[0, 0].numpy()) -``` - -For more details on using the MusicGen model for inference using the 🤗 Transformers library, refer to the -[MusicGen docs](https://huggingface.co/docs/transformers/main/en/model_doc/musicgen) or the hands-on -[Google Colab](https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/MusicGen.ipynb). - - -## Training - -The [MusicGenSolver](../audiocraft/solvers/musicgen.py) implements MusicGen's training pipeline. -It defines an autoregressive language modeling task over multiple streams of discrete tokens -extracted from a pre-trained EnCodec model (see [EnCodec documentation](./ENCODEC.md) -for more details on how to train such model). - -Note that **we do NOT provide any of the datasets** used for training MusicGen. -We provide a dummy dataset containing just a few examples for illustrative purposes. - -Please read first the [TRAINING documentation](./TRAINING.md), in particular the Environment Setup section. - -### Example configurations and grids - -We provide configurations to reproduce the released models and our research. -MusicGen solvers configuration are available in [config/solver/musicgen](../config/solver/musicgen), -in particular: -* MusicGen base model for text-to-music: -[`solver=musicgen/musicgen_base_32khz`](../config/solver/musicgen/musicgen_base_32khz.yaml) -* MusicGen model with chromagram-conditioning support: -[`solver=musicgen/musicgen_melody_32khz`](../config/solver/musicgen/musicgen_melody_32khz.yaml) - -We provide 3 different scales, e.g. `model/lm/model_scale=small` (300M), or `medium` (1.5B), and `large` (3.3B). - -Please find some example grids to train MusicGen at -[audiocraft/grids/musicgen](../audiocraft/grids/musicgen/). - -```shell -# text-to-music -dora grid musicgen.musicgen_base_32khz --dry_run --init -# melody-guided music generation -dora grid musicgen.musicgen_melody_base_32khz --dry_run --init -# Remove the `--dry_run --init` flags to actually schedule the jobs once everything is setup. -``` - -### Music dataset and metadata - -MusicGen's underlying dataset is an AudioDataset augmented with music-specific metadata. -The MusicGen dataset implementation expects the metadata to be available as `.json` files -at the same location as the audio files. Learn more in the [datasets section](./DATASETS.md). - - -### Audio tokenizers - -We support a number of audio tokenizers: either pretrained EnCodec models, [DAC](https://github.com/descriptinc/descript-audio-codec), or your own models. -The tokenizer is controlled with the setting `compression_model_checkpoint`. -For instance, - -```bash -# Using the 32kHz EnCodec trained on music -dora run solver=musicgen/debug \ - compression_model_checkpoint=//pretrained/facebook/encodec_32khz \ - transformer_lm.n_q=4 transformer_lm.card=2048 - -# Using DAC -dora run solver=musicgen/debug \ - compression_model_checkpoint=//pretrained/dac_44khz \ - transformer_lm.n_q=9 transformer_lm.card=1024 \ - 'codebooks_pattern.delay.delays=[0,1,2,3,4,5,6,7,8]' - -# Using your own model after export (see ENCODEC.md) -dora run solver=musicgen/debug \ - compression_model_checkpoint=//pretrained//checkpoints/my_audio_lm/compression_state_dict.bin \ - transformer_lm.n_q=... transformer_lm.card=... - -# Using your own model from its training checkpoint. -dora run solver=musicgen/debug \ - compression_model_checkpoint=//sig/SIG \ # where SIG is the Dora signature of the EnCodec XP. - transformer_lm.n_q=... transformer_lm.card=... -``` - -**Warning:** you are responsible for setting the proper value for `transformer_lm.n_q` and `transformer_lm.card` (cardinality of the codebooks). You also have to update the codebook_pattern to match `n_q` as shown in the example for using DAC. . - - -### Fine tuning existing models - -You can initialize your model to one of the pretrained models by using the `continue_from` argument, in particular - -```bash -# Using pretrained MusicGen model. -dora run solver=musicgen/musicgen_base_32khz model/lm/model_scale=medium continue_from=//pretrained/facebook/musicgen-medium conditioner=text2music - -# Using another model you already trained with a Dora signature SIG. -dora run solver=musicgen/musicgen_base_32khz model/lm/model_scale=medium continue_from=//sig/SIG conditioner=text2music - -# Or providing manually a path -dora run solver=musicgen/musicgen_base_32khz model/lm/model_scale=medium continue_from=/checkpoints/my_other_xp/checkpoint.th -``` - -**Warning:** You are responsible for selecting the other parameters accordingly, in a way that make it compatible - with the model you are fine tuning. Configuration is NOT automatically inherited from the model you continue from. In particular make sure to select the proper `conditioner` and `model/lm/model_scale`. - -**Warning:** We currently do not support fine tuning a model with slightly different layers. If you decide - to change some parts, like the conditioning or some other parts of the model, you are responsible for manually crafting a checkpoint file from which we can safely run `load_state_dict`. - If you decide to do so, make sure your checkpoint is saved with `torch.save` and contains a dict - `{'best_state': {'model': model_state_dict_here}}`. Directly give the path to `continue_from` without a `//pretrained/` prefix. - -### Caching of EnCodec tokens - -It is possible to precompute the EnCodec tokens and other metadata. -An example of generating and using this cache provided in the [musicgen.musicgen_base_cached_32khz grid](../audiocraft/grids/musicgen/musicgen_base_cached_32khz.py). - -### Evaluation stage - -By default, evaluation stage is also computing the cross-entropy and the perplexity over the -evaluation dataset. Indeed the objective metrics used for evaluation can be costly to run -or require some extra dependencies. Please refer to the [metrics documentation](./METRICS.md) -for more details on the requirements for each metric. - -We provide an off-the-shelf configuration to enable running the objective metrics -for audio generation in -[config/solver/musicgen/evaluation/objective_eval](../config/solver/musicgen/evaluation/objective_eval.yaml). - -One can then activate evaluation the following way: -```shell -# using the configuration -dora run solver=musicgen/debug solver/musicgen/evaluation=objective_eval -# specifying each of the fields, e.g. to activate KL computation -dora run solver=musicgen/debug evaluate.metrics.kld=true -``` - -See [an example evaluation grid](../audiocraft/grids/musicgen/musicgen_pretrained_32khz_eval.py). - -### Generation stage - -The generation stage allows to generate samples conditionally and/or unconditionally and to perform -audio continuation (from a prompt). We currently support greedy sampling (argmax), sampling -from softmax with a given temperature, top-K and top-P (nucleus) sampling. The number of samples -generated and the batch size used are controlled by the `dataset.generate` configuration -while the other generation parameters are defined in `generate.lm`. - -```shell -# control sampling parameters -dora run solver=musicgen/debug generate.lm.gen_duration=10 generate.lm.use_sampling=true generate.lm.top_k=15 -``` - -#### Listening to samples - -Note that generation happens automatically every 25 epochs. You can easily access and -compare samples between models (as long as they are trained) on the same dataset using the -MOS tool. For that first `pip install Flask gunicorn`. Then -``` -gunicorn -w 4 -b 127.0.0.1:8895 -t 120 'scripts.mos:app' --access-logfile - -``` -And access the tool at [https://127.0.0.1:8895](https://127.0.0.1:8895). - -### Playing with the model - -Once you have launched some experiments, you can easily get access -to the Solver with the latest trained model using the following snippet. - -```python -from audiocraft.solvers.musicgen import MusicGen - -solver = MusicGen.get_eval_solver_from_sig('SIG', device='cpu', batch_size=8) -solver.model -solver.dataloaders -``` - -### Importing / Exporting models - -We do not support currently loading a model from the Hugging Face implementation or exporting to it. -If you want to export your model in a way that is compatible with `audiocraft.models.MusicGen` -API, you can run: - -```python -from audiocraft.utils import export -from audiocraft import train -xp = train.main.get_xp_from_sig('SIG_OF_LM') -export.export_lm(xp.folder / 'checkpoint.th', '/checkpoints/my_audio_lm/state_dict.bin') -# You also need to bundle the EnCodec model you used !! -## Case 1) you trained your own -xp_encodec = train.main.get_xp_from_sig('SIG_OF_ENCODEC') -export.export_encodec(xp_encodec.folder / 'checkpoint.th', '/checkpoints/my_audio_lm/compression_state_dict.bin') -## Case 2) you used a pretrained model. Give the name you used without the //pretrained/ prefix. -## This will actually not dump the actual model, simply a pointer to the right model to download. -export.export_pretrained_compression_model('facebook/encodec_32khz', '/checkpoints/my_audio_lm/compression_state_dict.bin') -``` - -Now you can load your custom model with: -```python -import audiocraft.models -musicgen = audiocraft.models.MusicGen.get_pretrained('/checkpoints/my_audio_lm/') -``` - - -### Learn more - -Learn more about AudioCraft training pipelines in the [dedicated section](./TRAINING.md). - -## FAQ - -#### I need help on Windows - -@FurkanGozukara made a complete tutorial for [AudioCraft/MusicGen on Windows](https://youtu.be/v-YpvPkhdO4) - -#### I need help for running the demo on Colab - -Check [@camenduru tutorial on YouTube](https://www.youtube.com/watch?v=EGfxuTy9Eeo). - -#### What are top-k, top-p, temperature and classifier-free guidance? - -Check out [@FurkanGozukara tutorial](https://github.com/FurkanGozukara/Stable-Diffusion/blob/main/Tutorials/AI-Music-Generation-Audiocraft-Tutorial.md#more-info-about-top-k-top-p-temperature-and-classifier-free-guidance-from-chatgpt). - -#### Should I use FSDP or autocast ? - -The two are mutually exclusive (because FSDP does autocast on its own). -You can use autocast up to 1.5B (medium), if you have enough RAM on your GPU. -FSDP makes everything more complex but will free up some memory for the actual -activations by sharding the optimizer state. - -## Citation -``` -@article{copet2023simple, - title={Simple and Controllable Music Generation}, - author={Jade Copet and Felix Kreuk and Itai Gat and Tal Remez and David Kant and Gabriel Synnaeve and Yossi Adi and Alexandre Défossez}, - year={2023}, - journal={arXiv preprint arXiv:2306.05284}, -} -``` - - -## License - -See license information in the [model card](../model_cards/MUSICGEN_MODEL_CARD.md). - - -[arxiv]: https://arxiv.org/abs/2306.05284 -[musicgen_samples]: https://ai.honu.io/papers/musicgen/ diff --git a/spaces/GrandaddyShmax/MusicGen_Plus/MODEL_CARD.md b/spaces/GrandaddyShmax/MusicGen_Plus/MODEL_CARD.md deleted file mode 100644 index 6c2c9f883969eb905e74ad3376966d156cc5ca00..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/MusicGen_Plus/MODEL_CARD.md +++ /dev/null @@ -1,81 +0,0 @@ -# MusicGen Model Card - -## Model details - -**Organization developing the model:** The FAIR team of Meta AI. - -**Model date:** MusicGen was trained between April 2023 and May 2023. - -**Model version:** This is the version 1 of the model. - -**Model type:** MusicGen consists of an EnCodec model for audio tokenization, an auto-regressive language model based on the transformer architecture for music modeling. The model comes in different sizes: 300M, 1.5B and 3.3B parameters ; and two variants: a model trained for text-to-music generation task and a model trained for melody-guided music generation. - -**Paper or resources for more information:** More information can be found in the paper [Simple and Controllable Music Generation][arxiv]. - -**Citation details** See [our paper][arxiv] - -**License** Code is released under MIT, model weights are released under CC-BY-NC 4.0. - -**Where to send questions or comments about the model:** Questions and comments about MusicGen can be sent via the [Github repository](https://github.com/facebookresearch/audiocraft) of the project, or by opening an issue. - -## Intended use -**Primary intended use:** The primary use of MusicGen is research on AI-based music generation, including: - -- Research efforts, such as probing and better understanding the limitations of generative models to further improve the state of science -- Generation of music guided by text or melody to understand current abilities of generative AI models by machine learning amateurs - -**Primary intended users:** The primary intended users of the model are researchers in audio, machine learning and artificial intelligence, as well as amateur seeking to better understand those models. - -**Out-of-scope use cases** The model should not be used on downstream applications without further risk evaluation and mitigation. The model should not be used to intentionally create or disseminate music pieces that create hostile or alienating environments for people. This includes generating music that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes. - -## Metrics - -**Models performance measures:** We used the following objective measure to evaluate the model on a standard music benchmark: - -- Frechet Audio Distance computed on features extracted from a pre-trained audio classifier (VGGish) -- Kullback-Leibler Divergence on label distributions extracted from a pre-trained audio classifier (PaSST) -- CLAP Score between audio embedding and text embedding extracted from a pre-trained CLAP model - -Additionally, we run qualitative studies with human participants, evaluating the performance of the model with the following axes: - -- Overall quality of the music samples; -- Text relevance to the provided text input; -- Adherence to the melody for melody-guided music generation. - -More details on performance measures and human studies can be found in the paper. - -**Decision thresholds:** Not applicable. - -## Evaluation datasets - -The model was evaluated on the [MusicCaps benchmark](https://www.kaggle.com/datasets/googleai/musiccaps) and on an in-domain held-out evaluation set, with no artist overlap with the training set. - -## Training datasets - -The model was trained on licensed data using the following sources: the [Meta Music Initiative Sound Collection](https://www.fb.com/sound), [Shutterstock music collection](https://www.shutterstock.com/music) and the [Pond5 music collection](https://www.pond5.com/). See the paper for more details about the training set and corresponding preprocessing. - -## Quantitative analysis - -More information can be found in the paper [Simple and Controllable Music Generation][arxiv], in the Experimental Setup section. - -## Limitations and biases - -**Data:** The data sources used to train the model are created by music professionals and covered by legal agreements with the right holders. The model is trained on 20K hours of data, we believe that scaling the model on larger datasets can further improve the performance of the model. - -**Mitigations:** Vocals have been removed from the data source using corresponding tags, and then using using a state-of-the-art music source separation method, namely using the open source [Hybrid Transformer for Music Source Separation](https://github.com/facebookresearch/demucs) (HT-Demucs). - -**Limitations:** - -- The model is not able to generate realistic vocals. -- The model has been trained with English descriptions and will not perform as well in other languages. -- The model does not perform equally well for all music styles and cultures. -- The model sometimes generates end of songs, collapsing to silence. -- It is sometimes difficult to assess what types of text descriptions provide the best generations. Prompt engineering may be required to obtain satisfying results. - -**Biases:** The source of data is potentially lacking diversity and all music cultures are not equally represented in the dataset. The model may not perform equally well on the wide variety of music genres that exists. The generated samples from the model will reflect the biases from the training data. Further work on this model should include methods for balanced and just representations of cultures, for example, by scaling the training data to be both diverse and inclusive. - -**Risks and harms:** Biases and limitations of the model may lead to generation of samples that may be considered as biased, inappropriate or offensive. We believe that providing the code to reproduce the research and train new models will allow to broaden the application to new and more representative data. - -**Use cases:** Users must be aware of the biases, limitations and risks of the model. MusicGen is a model developed for artificial intelligence research on controllable music generation. As such, it should not be used for downstream applications without further investigation and mitigation of risks. - -[arxiv]: https://arxiv.org/abs/2306.05284 diff --git a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/tests/modules/test_lstm.py b/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/tests/modules/test_lstm.py deleted file mode 100644 index 1248964c8191e19f27661f0974bef9cc967eb015..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/tests/modules/test_lstm.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import random -import torch - -from audiocraft.modules.lstm import StreamableLSTM - - -class TestStreamableLSTM: - - def test_lstm(self): - B, C, T = 4, 2, random.randint(1, 100) - - lstm = StreamableLSTM(C, 3, skip=False) - x = torch.randn(B, C, T) - y = lstm(x) - - print(y.shape) - assert y.shape == torch.Size([B, C, T]) - - def test_lstm_skip(self): - B, C, T = 4, 2, random.randint(1, 100) - - lstm = StreamableLSTM(C, 3, skip=True) - x = torch.randn(B, C, T) - y = lstm(x) - - assert y.shape == torch.Size([B, C, T]) diff --git a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/visualizer.py b/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/visualizer.py deleted file mode 100644 index 9d7b05c651d87ce14f3f5bb982c9d6f4dbee5868..0000000000000000000000000000000000000000 --- a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/visualizer.py +++ /dev/null @@ -1,487 +0,0 @@ -import colorsys -from enum import Enum, unique -from typing import Union - -import cv2 -import matplotlib as mpl -import matplotlib.colors as mplc -import matplotlib.figure as mplfigure -import numpy as np -import pycocotools.mask as mask_util -from matplotlib.backends.backend_agg import FigureCanvasAgg -from PIL import Image - -_LARGE_MASK_AREA_THRESH = 120000 -_COLORS = np.array( - [ - 0.000, 0.447, 0.741, - 0.850, 0.325, 0.098, - 0.929, 0.694, 0.125, - 0.494, 0.184, 0.556, - 0.466, 0.674, 0.188, - 0.301, 0.745, 0.933, - 0.635, 0.078, 0.184, - 0.300, 0.300, 0.300, - 0.600, 0.600, 0.600, - 1.000, 0.000, 0.000, - 1.000, 0.500, 0.000, - 0.749, 0.749, 0.000, - 0.000, 1.000, 0.000, - 0.000, 0.000, 1.000, - 0.667, 0.000, 1.000, - 0.333, 0.333, 0.000, - 0.333, 0.667, 0.000, - 0.333, 1.000, 0.000, - 0.667, 0.333, 0.000, - 0.667, 0.667, 0.000, - 0.667, 1.000, 0.000, - 1.000, 0.333, 0.000, - 1.000, 0.667, 0.000, - 1.000, 1.000, 0.000, - 0.000, 0.333, 0.500, - 0.000, 0.667, 0.500, - 0.000, 1.000, 0.500, - 0.333, 0.000, 0.500, - 0.333, 0.333, 0.500, - 0.333, 0.667, 0.500, - 0.333, 1.000, 0.500, - 0.667, 0.000, 0.500, - 0.667, 0.333, 0.500, - 0.667, 0.667, 0.500, - 0.667, 1.000, 0.500, - 1.000, 0.000, 0.500, - 1.000, 0.333, 0.500, - 1.000, 0.667, 0.500, - 1.000, 1.000, 0.500, - 0.000, 0.333, 1.000, - 0.000, 0.667, 1.000, - 0.000, 1.000, 1.000, - 0.333, 0.000, 1.000, - 0.333, 0.333, 1.000, - 0.333, 0.667, 1.000, - 0.333, 1.000, 1.000, - 0.667, 0.000, 1.000, - 0.667, 0.333, 1.000, - 0.667, 0.667, 1.000, - 0.667, 1.000, 1.000, - 1.000, 0.000, 1.000, - 1.000, 0.333, 1.000, - 1.000, 0.667, 1.000, - 0.333, 0.000, 0.000, - 0.500, 0.000, 0.000, - 0.667, 0.000, 0.000, - 0.833, 0.000, 0.000, - 1.000, 0.000, 0.000, - 0.000, 0.167, 0.000, - 0.000, 0.333, 0.000, - 0.000, 0.500, 0.000, - 0.000, 0.667, 0.000, - 0.000, 0.833, 0.000, - 0.000, 1.000, 0.000, - 0.000, 0.000, 0.167, - 0.000, 0.000, 0.333, - 0.000, 0.000, 0.500, - 0.000, 0.000, 0.667, - 0.000, 0.000, 0.833, - 0.000, 0.000, 1.000, - 0.000, 0.000, 0.000, - 0.143, 0.143, 0.143, - 0.857, 0.857, 0.857, - 1.000, 1.000, 1.000 - ] -).astype(np.float32).reshape(-1, 3) - -def random_color(rgb=False, maximum=255): - """ - Args: - rgb (bool): whether to return RGB colors or BGR colors. - maximum (int): either 255 or 1 - - Returns: - ndarray: a vector of 3 numbers - """ - idx = np.random.randint(0, len(_COLORS)) - ret = _COLORS[idx] * maximum - if not rgb: - ret = ret[::-1] - return ret - -@unique -class ColorMode(Enum): - """ - Enum of different color modes to use for instance visualizations. - """ - - IMAGE = 0 - """ - Picks a random color for every instance and overlay segmentations with low opacity. - """ - SEGMENTATION = 1 - """ - Let instances of the same category have similar colors - (from metadata.thing_colors), and overlay them with - high opacity. This provides more attention on the quality of segmentation. - """ - IMAGE_BW = 2 - """ - Same as IMAGE, but convert all areas without masks to gray-scale. - Only available for drawing per-instance mask predictions. - """ - -class VisImage: - def __init__(self, img, scale=1.0): - """ - Args: - img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255]. - scale (float): scale the input image - """ - self.img = img - self.scale = scale - self.width, self.height = img.shape[1], img.shape[0] - self._setup_figure(img) - - def _setup_figure(self, img): - """ - Args: - Same as in :meth:`__init__()`. - - Returns: - fig (matplotlib.pyplot.figure): top level container for all the image plot elements. - ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system. - """ - fig = mplfigure.Figure(frameon=False) - self.dpi = fig.get_dpi() - # add a small 1e-2 to avoid precision lost due to matplotlib's truncation - # (https://github.com/matplotlib/matplotlib/issues/15363) - fig.set_size_inches( - (self.width * self.scale + 1e-2) / self.dpi, - (self.height * self.scale + 1e-2) / self.dpi, - ) - self.canvas = FigureCanvasAgg(fig) - # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) - ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) - ax.axis("off") - self.fig = fig - self.ax = ax - self.reset_image(img) - - def reset_image(self, img): - """ - Args: - img: same as in __init__ - """ - img = img.astype("uint8") - self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest") - - def save(self, filepath): - """ - Args: - filepath (str): a string that contains the absolute path, including the file name, where - the visualized image will be saved. - """ - self.fig.savefig(filepath) - - def get_image(self): - """ - Returns: - ndarray: - the visualized image of shape (H, W, 3) (RGB) in uint8 type. - The shape is scaled w.r.t the input image using the given `scale` argument. - """ - canvas = self.canvas - s, (width, height) = canvas.print_to_buffer() - # buf = io.BytesIO() # works for cairo backend - # canvas.print_rgba(buf) - # width, height = self.width, self.height - # s = buf.getvalue() - - buffer = np.frombuffer(s, dtype="uint8") - - img_rgba = buffer.reshape(height, width, 4) - rgb, alpha = np.split(img_rgba, [3], axis=2) - return rgb.astype("uint8") - -class GenericMask: - """ - Attribute: - polygons (list[ndarray]): list[ndarray]: polygons for this mask. - Each ndarray has format [x, y, x, y, ...] - mask (ndarray): a binary mask - """ - - def __init__(self, mask_or_polygons, height, width): - self._mask = self._polygons = self._has_holes = None - self.height = height - self.width = width - - m = mask_or_polygons - if isinstance(m, dict): - # RLEs - assert "counts" in m and "size" in m - if isinstance(m["counts"], list): # uncompressed RLEs - h, w = m["size"] - assert h == height and w == width - m = mask_util.frPyObjects(m, h, w) - self._mask = mask_util.decode(m)[:, :] - return - - if isinstance(m, list): # list[ndarray] - self._polygons = [np.asarray(x).reshape(-1) for x in m] - return - - if isinstance(m, np.ndarray): # assumed to be a binary mask - assert m.shape[1] != 2, m.shape - assert m.shape == ( - height, - width, - ), f"mask shape: {m.shape}, target dims: {height}, {width}" - self._mask = m.astype("uint8") - return - - raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m))) - - @property - def mask(self): - if self._mask is None: - self._mask = self.polygons_to_mask(self._polygons) - return self._mask - - @property - def polygons(self): - if self._polygons is None: - self._polygons, self._has_holes = self.mask_to_polygons(self._mask) - return self._polygons - - @property - def has_holes(self): - if self._has_holes is None: - if self._mask is not None: - self._polygons, self._has_holes = self.mask_to_polygons(self._mask) - else: - self._has_holes = False # if original format is polygon, does not have holes - return self._has_holes - - def mask_to_polygons(self, mask): - # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level - # hierarchy. External contours (boundary) of the object are placed in hierarchy-1. - # Internal contours (holes) are placed in hierarchy-2. - # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours. - mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr - res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) - hierarchy = res[-1] - if hierarchy is None: # empty mask - return [], False - has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0 - res = res[-2] - res = [x.flatten() for x in res] - # These coordinates from OpenCV are integers in range [0, W-1 or H-1]. - # We add 0.5 to turn them into real-value coordinate space. A better solution - # would be to first +0.5 and then dilate the returned polygon by 0.5. - res = [x + 0.5 for x in res if len(x) >= 6] - return res, has_holes - - def polygons_to_mask(self, polygons): - rle = mask_util.frPyObjects(polygons, self.height, self.width) - rle = mask_util.merge(rle) - return mask_util.decode(rle)[:, :] - - def area(self): - return self.mask.sum() - - def bbox(self): - p = mask_util.frPyObjects(self.polygons, self.height, self.width) - p = mask_util.merge(p) - bbox = mask_util.toBbox(p) - bbox[2] += bbox[0] - bbox[3] += bbox[1] - return bbox - - -class Visualizer: - """ - Visualizer that draws data about detection/segmentation on images. - - It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}` - that draw primitive objects to images, as well as high-level wrappers like - `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}` - that draw composite data in some pre-defined style. - - Note that the exact visualization style for the high-level wrappers are subject to change. - Style such as color, opacity, label contents, visibility of labels, or even the visibility - of objects themselves (e.g. when the object is too small) may change according - to different heuristics, as long as the results still look visually reasonable. - - To obtain a consistent style, you can implement custom drawing functions with the - abovementioned primitive methods instead. If you need more customized visualization - styles, you can process the data yourself following their format documented in - tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not - intend to satisfy everyone's preference on drawing styles. - - This visualizer focuses on high rendering quality rather than performance. It is not - designed to be used for real-time applications. - """ - - # TODO implement a fast, rasterized version using OpenCV - - def __init__(self, img_rgb: Union[Image.Image, np.ndarray], scale=1.0, instance_mode=ColorMode.IMAGE): - """ - Args: - img_rgb: a numpy array of shape (H, W, C), where H and W correspond to - the height and width of the image respectively. C is the number of - color channels. The image is required to be in RGB format since that - is a requirement of the Matplotlib library. The image is also expected - to be in the range [0, 255]. - instance_mode (ColorMode): defines one of the pre-defined style for drawing - instances on an image. - """ - if type(img_rgb) == np.ndarray: - img_rgb = img_rgb[:, :, ::-1] - else: - img_rgb = np.array(img_rgb)[:, :, ::-1] - self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) - self.output = VisImage(self.img, scale=scale) - - # too small texts are useless, therefore clamp to 9 - self._default_font_size = max( - np.sqrt(self.output.height * self.output.width) // 90, 10 // scale - ) - self._instance_mode = instance_mode - - def draw_binary_mask( - self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=10 - ): - """ - Args: - binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and - W is the image width. Each value in the array is either a 0 or 1 value of uint8 - type. - color: color of the mask. Refer to `matplotlib.colors` for a full list of - formats that are accepted. If None, will pick a random color. - edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a - full list of formats that are accepted. - text (str): if None, will be drawn on the object - alpha (float): blending efficient. Smaller values lead to more transparent masks. - area_threshold (float): a connected component smaller than this area will not be shown. - - Returns: - output (VisImage): image object with mask drawn. - """ - if color is None: - color = random_color(rgb=True, maximum=1) - color = mplc.to_rgb(color) - - has_valid_segment = False - binary_mask = binary_mask.astype("uint8") # opencv needs uint8 - mask = GenericMask(binary_mask, self.output.height, self.output.width) - shape2d = (binary_mask.shape[0], binary_mask.shape[1]) - - if not mask.has_holes: - # draw polygons for regular masks - for segment in mask.polygons: - area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1])) - if area < (area_threshold or 0): - continue - has_valid_segment = True - segment = segment.reshape(-1, 2) - self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha) - else: - # TODO: Use Path/PathPatch to draw vector graphics: - # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon - rgba = np.zeros(shape2d + (4,), dtype="float32") - rgba[:, :, :3] = color - rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha - has_valid_segment = True - self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) - - if text is not None and has_valid_segment: - lighter_color = self._change_color_brightness(color, brightness_factor=0.7) - self._draw_text_in_mask(binary_mask, text, lighter_color) - return self.output - - def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): - """ - Args: - segment: numpy array of shape Nx2, containing all the points in the polygon. - color: color of the polygon. Refer to `matplotlib.colors` for a full list of - formats that are accepted. - edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a - full list of formats that are accepted. If not provided, a darker shade - of the polygon color will be used instead. - alpha (float): blending efficient. Smaller values lead to more transparent masks. - - Returns: - output (VisImage): image object with polygon drawn. - """ - if edge_color is None: - # make edge color darker than the polygon color - if alpha > 0.8: - edge_color = self._change_color_brightness(color, brightness_factor=-0.7) - else: - edge_color = color - edge_color = mplc.to_rgb(edge_color) + (1,) - - polygon = mpl.patches.Polygon( - segment, - fill=True, - facecolor=mplc.to_rgb(color) + (alpha,), - edgecolor=edge_color, - linewidth=max(self._default_font_size // 15 * self.output.scale, 1), - ) - self.output.ax.add_patch(polygon) - return self.output - - - """ - Internal methods: - """ - - def _change_color_brightness(self, color, brightness_factor): - """ - Depending on the brightness_factor, gives a lighter or darker color i.e. a color with - less or more saturation than the original color. - - Args: - color: color of the polygon. Refer to `matplotlib.colors` for a full list of - formats that are accepted. - brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of - 0 will correspond to no change, a factor in [-1.0, 0) range will result in - a darker color and a factor in (0, 1.0] range will result in a lighter color. - - Returns: - modified_color (tuple[double]): a tuple containing the RGB values of the - modified color. Each value in the tuple is in the [0.0, 1.0] range. - """ - assert brightness_factor >= -1.0 and brightness_factor <= 1.0 - color = mplc.to_rgb(color) - polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) - modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) - modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness - modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness - modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) - return modified_color - - def _draw_text_in_mask(self, binary_mask, text, color): - """ - Find proper places to draw text given a binary mask. - """ - # TODO sometimes drawn on wrong objects. the heuristics here can improve. - _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8) - if stats[1:, -1].size == 0: - return - largest_component_id = np.argmax(stats[1:, -1]) + 1 - - # draw text on the largest component, as well as other very large components. - for cid in range(1, _num_cc): - if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH: - # median is more stable than centroid - # center = centroids[largest_component_id] - center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1] - self.draw_text(text, center, color=color) - - def get_output(self): - """ - Returns: - output (VisImage): the image output containing the visualizations added - to the image. - """ - return self.output diff --git a/spaces/HaMerL/ChaosinChat/modules/models/configuration_moss.py b/spaces/HaMerL/ChaosinChat/modules/models/configuration_moss.py deleted file mode 100644 index 9bad4396ecea6578c1628732d0ef077d8964d45d..0000000000000000000000000000000000000000 --- a/spaces/HaMerL/ChaosinChat/modules/models/configuration_moss.py +++ /dev/null @@ -1,118 +0,0 @@ -""" Moss model configuration""" - -from transformers.utils import logging -from transformers.configuration_utils import PretrainedConfig - - -logger = logging.get_logger(__name__) - - -class MossConfig(PretrainedConfig): - r""" - This is the configuration class to store the configuration of a [`MossModel`]. It is used to instantiate a - Moss model according to the specified arguments, defining the model architecture. Instantiating a configuration - with the defaults will yield a similar configuration to that of the Moss - [fnlp/moss-moon-003-base](https://huggingface.co/fnlp/moss-moon-003-base) architecture. Configuration objects - inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from - [`PretrainedConfig`] for more information. - - Args: - vocab_size (`int`, *optional*, defaults to 107008): - Vocabulary size of the Moss model. Defines the number of different tokens that can be represented by the - `inputs_ids` passed when calling [`MossModel`]. - n_positions (`int`, *optional*, defaults to 2048): - The maximum sequence length that this model might ever be used with. Typically set this to something large - just in case (e.g., 512 or 1024 or 2048). - n_embd (`int`, *optional*, defaults to 4096): - Dimensionality of the embeddings and hidden states. - n_layer (`int`, *optional*, defaults to 28): - Number of hidden layers in the Transformer encoder. - n_head (`int`, *optional*, defaults to 16): - Number of attention heads for each attention layer in the Transformer encoder. - rotary_dim (`int`, *optional*, defaults to 64): - Number of dimensions in the embedding that Rotary Position Embedding is applied to. - n_inner (`int`, *optional*, defaults to None): - Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd - activation_function (`str`, *optional*, defaults to `"gelu_new"`): - Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`. - resid_pdrop (`float`, *optional*, defaults to 0.1): - The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. - embd_pdrop (`int`, *optional*, defaults to 0.1): - The dropout ratio for the embeddings. - attn_pdrop (`float`, *optional*, defaults to 0.1): - The dropout ratio for the attention. - layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): - The epsilon to use in the layer normalization layers. - initializer_range (`float`, *optional*, defaults to 0.02): - The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - use_cache (`bool`, *optional*, defaults to `True`): - Whether or not the model should return the last key/values attentions (not used by all models). - - Example: - - ```python - >>> from modeling_moss import MossModel - >>> from configuration_moss import MossConfig - - >>> # Initializing a moss-moon-003-base configuration - >>> configuration = MossConfig() - - >>> # Initializing a model (with random weights) from the configuration - >>> model = MossModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - ```""" - - model_type = "moss" - attribute_map = { - "max_position_embeddings": "n_positions", - "hidden_size": "n_embd", - "num_attention_heads": "n_head", - "num_hidden_layers": "n_layer", - } - - def __init__( - self, - vocab_size=107008, - n_positions=2048, - n_ctx=2048, - n_embd=4096, - n_layer=28, - n_head=16, - rotary_dim=64, - n_inner=None, - activation_function="gelu_new", - resid_pdrop=0.0, - embd_pdrop=0.0, - attn_pdrop=0.0, - layer_norm_epsilon=1e-5, - initializer_range=0.02, - use_cache=True, - bos_token_id=106028, - eos_token_id=106068, - tie_word_embeddings=False, - **kwargs, - ): - self.vocab_size = vocab_size - self.n_ctx = n_ctx - self.n_positions = n_positions - self.n_embd = n_embd - self.n_layer = n_layer - self.n_head = n_head - self.n_inner = n_inner - self.rotary_dim = rotary_dim - self.activation_function = activation_function - self.resid_pdrop = resid_pdrop - self.embd_pdrop = embd_pdrop - self.attn_pdrop = attn_pdrop - self.layer_norm_epsilon = layer_norm_epsilon - self.initializer_range = initializer_range - self.use_cache = use_cache - - self.bos_token_id = bos_token_id - self.eos_token_id = eos_token_id - - super().__init__( - bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs - ) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py deleted file mode 100644 index f83471409a434556cab70086ca9e2d72d4bdddd5..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python3 -u -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import sys - - -def main(): - for line in sys.stdin: - print(" ".join(list(line.strip().replace(" ", "|"))) + " |") - - -if __name__ == "__main__": - main() diff --git a/spaces/Harveenchadha/en_to_indic_translation/subword-nmt/learn_joint_bpe_and_vocab.py b/spaces/Harveenchadha/en_to_indic_translation/subword-nmt/learn_joint_bpe_and_vocab.py deleted file mode 100644 index d2bd754ce36dc782c554f382cc847906703bbb81..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/en_to_indic_translation/subword-nmt/learn_joint_bpe_and_vocab.py +++ /dev/null @@ -1 +0,0 @@ -subword_nmt/learn_joint_bpe_and_vocab.py \ No newline at end of file diff --git a/spaces/Himanshi/Face-Cartoonify-for-Video-Call-Privacy/README.md b/spaces/Himanshi/Face-Cartoonify-for-Video-Call-Privacy/README.md deleted file mode 100644 index 4ddbae927d0561ef1ba15bd19eb01d5a28a501ec..0000000000000000000000000000000000000000 --- a/spaces/Himanshi/Face-Cartoonify-for-Video-Call-Privacy/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Face Cartoonify For Video Call Privacy -emoji: 👁 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.0.5 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/ICML2022/OFA/fairseq/examples/adaptive_span/truncated_bptt_lm_task.py b/spaces/ICML2022/OFA/fairseq/examples/adaptive_span/truncated_bptt_lm_task.py deleted file mode 100644 index a92da3a298e21528b7007df3f8198bb3af94a485..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/adaptive_span/truncated_bptt_lm_task.py +++ /dev/null @@ -1 +0,0 @@ -../truncated_bptt/truncated_bptt_lm_task.py \ No newline at end of file diff --git a/spaces/ICML2022/OFA/fairseq/examples/latent_depth/latent_depth_src/models/latent_multilingual_transformer.py b/spaces/ICML2022/OFA/fairseq/examples/latent_depth/latent_depth_src/models/latent_multilingual_transformer.py deleted file mode 100644 index 9e7b655feee0042d42ac2b13cec5f1d2a88e201e..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/latent_depth/latent_depth_src/models/latent_multilingual_transformer.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from fairseq.models import register_model, register_model_architecture -from fairseq.models.multilingual_transformer import MultilingualTransformerModel -from fairseq.models.transformer import ( - TransformerDecoder, - TransformerEncoder, - base_architecture, -) -from fairseq.utils import safe_hasattr - -from .latent_transformer import LatentTransformerDecoder, LatentTransformerEncoder - - -@register_model("latent_multilingual_transformer") -class LatentMultilingualTransformerModel(MultilingualTransformerModel): - """A variant of standard multilingual Transformer models which encoder and/or - decoders supports latent depth, as is in "Deep Transformer with Latent Depth" - (https://arxiv.org/abs/2009.13102). - """ - - @staticmethod - def add_args(parser): - """Add model-specific arguments to the parser.""" - MultilingualTransformerModel.add_args(parser) - parser.add_argument( - '--soft-select', - action='store_true', - help='use soft samples in training an inference', - ) - parser.add_argument( - '--sampling-tau', - type=float, - default=5., - help='sampling temperature', - ) - - @classmethod - def _get_module_class(cls, is_encoder, args, lang_dict, embed_tokens, langs): - if is_encoder: - if safe_hasattr(args, "encoder_latent_layer") and args.encoder_latent_layer: - return LatentTransformerEncoder( - args, lang_dict, embed_tokens, num_logits=len(langs) - ) - else: - return TransformerEncoder(args, lang_dict, embed_tokens) - else: - if safe_hasattr(args, "decoder_latent_layer") and args.decoder_latent_layer: - return LatentTransformerDecoder( - args, lang_dict, embed_tokens, num_logits=len(langs) - ) - else: - return TransformerDecoder(args, lang_dict, embed_tokens) - - -@register_model_architecture( - "latent_multilingual_transformer", "latent_multilingual_transformer" -) -def latent_multilingual_architecture(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) - args.encoder_layers = getattr(args, "encoder_layers", 12) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) - args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) - args.decoder_layers = getattr(args, "decoder_layers", 24) - args.share_encoders = getattr(args, "share_encoders", True) - args.share_decoders = getattr(args, "share_decoders", True) - args.share_encoder_embeddings = getattr(args, "share_encoder_embeddings", True) - args.share_decoder_embeddings = getattr(args, "share_decoder_embeddings", True) - - base_architecture(args) diff --git a/spaces/ICML2022/OFA/fairseq/examples/speech_to_text/docs/librispeech_example.md b/spaces/ICML2022/OFA/fairseq/examples/speech_to_text/docs/librispeech_example.md deleted file mode 100644 index 4040fda9426027537036ba987d087a43e734bfd9..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/speech_to_text/docs/librispeech_example.md +++ /dev/null @@ -1,69 +0,0 @@ -[[Back]](..) - -# S2T Example: Speech Recognition (ASR) on LibriSpeech -[LibriSpeech](https://www.danielpovey.com/files/2015_icassp_librispeech.pdf) is a de-facto standard English ASR -benchmark. We provide competitive -vanilla [Transformer](https://papers.nips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf) baselines. - -## Data preparation -Download and preprocess LibriSpeech data with -```bash -# additional Python packages for S2T data processing/model training -pip install pandas torchaudio sentencepiece - -python examples/speech_to_text/prep_librispeech_data.py \ - --output-root ${LS_ROOT} --vocab-type unigram --vocab-size 10000 -``` -where `LS_ROOT` is the root path for downloaded data as well as generated files (manifest, features, vocabulary and -data configuration). - -[Download](https://dl.fbaipublicfiles.com/fairseq/s2t/librispeech_vocab_unigram10000.zip) our vocabulary files -if you want to use our pre-trained models. - -## Training -```bash -fairseq-train ${LS_ROOT} --save-dir ${SAVE_DIR} \ - --config-yaml config.yaml --train-subset train-clean-100,train-clean-360,train-other-500 --valid-subset dev-clean,dev-other \ - --num-workers 4 --max-tokens 40000 --max-update 300000 \ - --task speech_to_text --criterion label_smoothed_cross_entropy --label-smoothing 0.1 --report-accuracy \ - --arch s2t_transformer_s --share-decoder-input-output-embed \ - --optimizer adam --lr 2e-3 --lr-scheduler inverse_sqrt --warmup-updates 10000 \ - --clip-norm 10.0 --seed 1 --update-freq 8 -``` -where `SAVE_DIR` is the checkpoint root path. Here we use `--arch s2t_transformer_s` (31M parameters) as example. -For better performance, you may switch to `s2t_transformer_m` (71M, with `--lr 1e-3`) or `s2t_transformer_l` -(268M, with `--lr 5e-4`). We set `--update-freq 8` to simulate 8 GPUs with 1 GPU. You may want to update it accordingly -when using more than 1 GPU. - -## Inference & Evaluation -Average the last 10 checkpoints and evaluate on the 4 splits -(`dev-clean`, `dev-other`, `test-clean` and `test-other`): -```bash -CHECKPOINT_FILENAME=avg_last_10_checkpoint.pt -python scripts/average_checkpoints.py --inputs ${SAVE_DIR} \ - --num-epoch-checkpoints 10 \ - --output "${SAVE_DIR}/${CHECKPOINT_FILENAME}" -for SUBSET in dev-clean dev-other test-clean test-other; do - fairseq-generate ${LS_ROOT} --config-yaml config.yaml --gen-subset ${SUBSET} \ - --task speech_to_text --path ${SAVE_DIR}/${CHECKPOINT_FILENAME} \ - --max-tokens 50000 --beam 5 --scoring wer -done -``` - -## Interactive Decoding -Launch the interactive console via -```bash -fairseq-interactive ${LS_ROOT} --config-yaml config.yaml --task speech_to_text \ - --path ${SAVE_DIR}/${CHECKPOINT_FILENAME} --max-tokens 50000 --beam 5 -``` -Type in WAV/FLAC/OGG audio paths (one per line) after the prompt. - -## Results - -| --arch | Params | dev-clean | dev-other | test-clean | test-other | Model | -|---|---|---|---|---|---|---| -| s2t_transformer_s | 30M | 3.8 | 8.9 | 4.4 | 9.0 | [Download](https://dl.fbaipublicfiles.com/fairseq/s2t/librispeech_transformer_s.pt) | -| s2t_transformer_m | 71M | 3.2 | 8.0 | 3.4 | 7.9 | [Download](https://dl.fbaipublicfiles.com/fairseq/s2t/librispeech_transformer_m.pt) | -| s2t_transformer_l | 268M | 3.0 | 7.5 | 3.2 | 7.5 | [Download](https://dl.fbaipublicfiles.com/fairseq/s2t/librispeech_transformer_l.pt) | - -[[Back]](..) diff --git a/spaces/Ifeanyi/classify-images/app.py b/spaces/Ifeanyi/classify-images/app.py deleted file mode 100644 index 59006fd2cb4cee711d58225e856c11b9b03a32ef..0000000000000000000000000000000000000000 --- a/spaces/Ifeanyi/classify-images/app.py +++ /dev/null @@ -1,6 +0,0 @@ -from transformers import pipeline -import gradio as gr -import timm - -image_classifier = pipeline("image-classification") -gr.Interface.from_pipeline(image_classifier).launch(share = True) \ No newline at end of file diff --git a/spaces/Ikaros521/VITS-fast-fine-tuning_nymph/data_utils.py b/spaces/Ikaros521/VITS-fast-fine-tuning_nymph/data_utils.py deleted file mode 100644 index c6c8dee9d157161f2082484b89bdb282364e2a0e..0000000000000000000000000000000000000000 --- a/spaces/Ikaros521/VITS-fast-fine-tuning_nymph/data_utils.py +++ /dev/null @@ -1,267 +0,0 @@ -import time -import os -import random -import numpy as np -import torch -import torch.utils.data -import torchaudio - -import commons -from mel_processing import spectrogram_torch -from utils import load_wav_to_torch, load_filepaths_and_text -from text import text_to_sequence, cleaned_text_to_sequence -"""Multi speaker version""" - - -class TextAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths_sid_text, hparams, symbols): - self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text) - self.text_cleaners = hparams.text_cleaners - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - - self.cleaned_text = getattr(hparams, "cleaned_text", False) - - self.add_blank = hparams.add_blank - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 190) - self.symbols = symbols - - random.seed(1234) - random.shuffle(self.audiopaths_sid_text) - self._filter() - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - - audiopaths_sid_text_new = [] - lengths = [] - for audiopath, sid, text in self.audiopaths_sid_text: - # audiopath = "./user_voice/" + audiopath - - if self.min_text_len <= len(text) and len(text) <= self.max_text_len: - audiopaths_sid_text_new.append([audiopath, sid, text]) - lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) - self.audiopaths_sid_text = audiopaths_sid_text_new - self.lengths = lengths - - def get_audio_text_speaker_pair(self, audiopath_sid_text): - # separate filename, speaker_id and text - audiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2] - text = self.get_text(text) - spec, wav = self.get_audio(audiopath) - sid = self.get_sid(sid) - return (text, spec, wav, sid) - - def get_audio(self, filename): - # audio, sampling_rate = load_wav_to_torch(filename) - # if sampling_rate != self.sampling_rate: - # raise ValueError("{} {} SR doesn't match target {} SR".format( - # sampling_rate, self.sampling_rate)) - # audio_norm = audio / self.max_wav_value if audio.max() > 10 else audio - # audio_norm = audio_norm.unsqueeze(0) - audio_norm, sampling_rate = torchaudio.load(filename, frame_offset=0, num_frames=-1, normalize=True, channels_first=True) - # spec_filename = filename.replace(".wav", ".spec.pt") - # if os.path.exists(spec_filename): - # spec = torch.load(spec_filename) - # else: - # try: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = spec.squeeze(0) - # except NotImplementedError: - # print("?") - # spec = torch.squeeze(spec, 0) - # torch.save(spec, spec_filename) - return spec, audio_norm - - def get_text(self, text): - if self.cleaned_text: - text_norm = cleaned_text_to_sequence(text, self.symbols) - else: - text_norm = text_to_sequence(text, self.text_cleaners) - if self.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - return text_norm - - def get_sid(self, sid): - sid = torch.LongTensor([int(sid)]) - return sid - - def __getitem__(self, index): - return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index]) - - def __len__(self): - return len(self.audiopaths_sid_text) - - -class TextAudioSpeakerCollate(): - """ Zero-pads model inputs and targets - """ - - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text, audio and speaker identities - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized, sid] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[1].size(1) for x in batch]), - dim=0, descending=True) - - max_text_len = max([len(x[0]) for x in batch]) - max_spec_len = max([x[1].size(1) for x in batch]) - max_wav_len = max([x[2].size(1) for x in batch]) - - text_lengths = torch.LongTensor(len(batch)) - spec_lengths = torch.LongTensor(len(batch)) - wav_lengths = torch.LongTensor(len(batch)) - sid = torch.LongTensor(len(batch)) - - text_padded = torch.LongTensor(len(batch), max_text_len) - spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - text_padded.zero_() - spec_padded.zero_() - wav_padded.zero_() - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - text = row[0] - text_padded[i, :text.size(0)] = text - text_lengths[i] = text.size(0) - - spec = row[1] - spec_padded[i, :, :spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wav = row[2] - wav_padded[i, :, :wav.size(1)] = wav - wav_lengths[i] = wav.size(1) - - sid[i] = row[3] - - if self.return_ids: - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid - - -class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): - """ - Maintain similar input lengths in a batch. - Length groups are specified by boundaries. - Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. - - It removes samples which are not included in the boundaries. - Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. - """ - - def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True): - super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) - self.lengths = dataset.lengths - self.batch_size = batch_size - self.boundaries = boundaries - - self.buckets, self.num_samples_per_bucket = self._create_buckets() - self.total_size = sum(self.num_samples_per_bucket) - self.num_samples = self.total_size // self.num_replicas - - def _create_buckets(self): - buckets = [[] for _ in range(len(self.boundaries) - 1)] - for i in range(len(self.lengths)): - length = self.lengths[i] - idx_bucket = self._bisect(length) - if idx_bucket != -1: - buckets[idx_bucket].append(i) - - for i in range(len(buckets) - 1, 0, -1): - if len(buckets[i]) == 0: - buckets.pop(i) - self.boundaries.pop(i + 1) - - num_samples_per_bucket = [] - for i in range(len(buckets)): - len_bucket = len(buckets[i]) - total_batch_size = self.num_replicas * self.batch_size - rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size - num_samples_per_bucket.append(len_bucket + rem) - return buckets, num_samples_per_bucket - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch) - - indices = [] - if self.shuffle: - for bucket in self.buckets: - indices.append(torch.randperm(len(bucket), generator=g).tolist()) - else: - for bucket in self.buckets: - indices.append(list(range(len(bucket)))) - - batches = [] - for i in range(len(self.buckets)): - bucket = self.buckets[i] - len_bucket = len(bucket) - ids_bucket = indices[i] - num_samples_bucket = self.num_samples_per_bucket[i] - - # add extra samples to make it evenly divisible - rem = num_samples_bucket - len_bucket - ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] - - # subsample - ids_bucket = ids_bucket[self.rank::self.num_replicas] - - # batching - for j in range(len(ids_bucket) // self.batch_size): - batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]] - batches.append(batch) - - if self.shuffle: - batch_ids = torch.randperm(len(batches), generator=g).tolist() - batches = [batches[i] for i in batch_ids] - self.batches = batches - - assert len(self.batches) * self.batch_size == self.num_samples - return iter(self.batches) - - def _bisect(self, x, lo=0, hi=None): - if hi is None: - hi = len(self.boundaries) - 1 - - if hi > lo: - mid = (hi + lo) // 2 - if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]: - return mid - elif x <= self.boundaries[mid]: - return self._bisect(x, lo, mid) - else: - return self._bisect(x, mid + 1, hi) - else: - return -1 - - def __len__(self): - return self.num_samples // self.batch_size \ No newline at end of file diff --git a/spaces/Illumotion/Koboldcpp/otherarch/tools/convert_hf_gpt2.py b/spaces/Illumotion/Koboldcpp/otherarch/tools/convert_hf_gpt2.py deleted file mode 100644 index b6e20983add4c80d1720ea6e795eec1b52117000..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/otherarch/tools/convert_hf_gpt2.py +++ /dev/null @@ -1,183 +0,0 @@ -# Convert Cerebras models to ggml format -# -# ref: https://www.cerebras.net/blog/cerebras-gpt-a-family-of-open-compute-efficient-large-language-models/ -# - -import sys -import struct -import json -import torch -import numpy as np -import re - -from transformers import AutoModelForCausalLM - -# ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py -def bytes_to_unicode(): - """ - Returns list of utf-8 byte and a corresponding list of unicode strings. - The reversible bpe codes work on unicode strings. - This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. - When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. - This is a signficant percentage of your normal, say, 32K bpe vocab. - To avoid that, we want lookup tables between utf-8 bytes and unicode strings. - And avoids mapping to whitespace/control characters the bpe code barfs on. - """ - bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) - cs = bs[:] - n = 0 - for b in range(2**8): - if b not in bs: - bs.append(b) - cs.append(2**8+n) - n += 1 - cs = [chr(n) for n in cs] - return dict(zip(bs, cs)) - -if len(sys.argv) < 2: - print("Usage: convert-h5-to-ggml.py dir-model [use-f32]\n") - sys.exit(1) - -# output in the same directory as the model -dir_model = sys.argv[1] -fname_out = sys.argv[1] + "/ggml-model-f16.bin" - -with open(dir_model + "/vocab.json", "r", encoding="utf-8") as f: - encoder = json.load(f) - -with open(dir_model + "/config.json", "r", encoding="utf-8") as f: - hparams = json.load(f) - -# use 16-bit or 32-bit floats -use_f16 = True -if len(sys.argv) > 2: - use_f16 = False - fname_out = sys.argv[1] + "/ggml-model-f32.bin" - -model = AutoModelForCausalLM.from_pretrained(dir_model, low_cpu_mem_usage=True) -#print (model) - -list_vars = model.state_dict() -#print (list_vars) - -print(hparams) - -fout = open(fname_out, "wb") - -fout.write(struct.pack("i", 0x67676d6c)) # magic: ggml in hex -fout.write(struct.pack("i", hparams["vocab_size"])) -fout.write(struct.pack("i", hparams["n_positions"])) -fout.write(struct.pack("i", hparams["n_embd"])) -fout.write(struct.pack("i", hparams["n_head"])) -fout.write(struct.pack("i", hparams["n_layer"])) -fout.write(struct.pack("i", use_f16)) - -byte_encoder = bytes_to_unicode() -byte_decoder = {v:k for k, v in byte_encoder.items()} - -fout.write(struct.pack("i", len(encoder))) - -for key in encoder: - text = bytearray([byte_decoder[c] for c in key]) - fout.write(struct.pack("i", len(text))) - fout.write(text) - -for name in list_vars.keys(): - data = list_vars[name].squeeze().numpy() - print("Processing variable: " + name + " with shape: ", data.shape) - - # rename headers to keep compatibility - if name == "transformer.ln_f.weight": - name = "model/ln_f/g" - elif name == "transformer.ln_f.bias": - name = "model/ln_f/b" - elif name == "transformer.wte.weight": - name = "model/wte" - elif name == "transformer.wpe.weight": - name = "model/wpe" - elif name == "lm_head.weight": - name = "model/lm_head" - elif re.match(r"transformer.h\.\d+\.ln_1\.weight", name): - i = re.findall("\d+", name)[0] - name = f"model/h{i}/ln_1/g" - elif re.match(r"transformer.h\.\d+\.ln_1\.bias", name): - i = re.findall("\d+", name)[0] - name = f"model/h{i}/ln_1/b" - elif re.match(r"transformer.h\.\d+\.attn\.c_attn\.weight", name): - i = re.findall("\d+", name)[0] - name = f"model/h{i}/attn/c_attn/w" - elif re.match(r"transformer.h\.\d+\.attn\.c_attn\.bias", name): - i = re.findall("\d+", name)[0] - name = f"model/h{i}/attn/c_attn/b" - elif re.match(r"transformer.h\.\d+\.attn\.c_proj\.weight", name): - i = re.findall("\d+", name)[0] - name = f"model/h{i}/attn/c_proj/w" - elif re.match(r"transformer.h.\d+.attn.c_proj.bias", name): - i = re.findall("\d+", name)[0] - name = f"model/h{i}/attn/c_proj/b" - elif re.match(r"transformer.h.\d+.ln_2.weight", name): - i = re.findall("\d+", name)[0] - name = f"model/h{i}/ln_2/g" - elif re.match(r"transformer.h.\d+.ln_2.bias", name): - i = re.findall("\d+", name)[0] - name = f"model/h{i}/ln_2/b" - elif re.match(r"transformer.h.\d+.mlp.c_fc.weight", name): - i = re.findall("\d+", name)[0] - name = f"model/h{i}/mlp/c_fc/w" - elif re.match(r"transformer.h.\d+.mlp.c_fc.bias", name): - i = re.findall("\d+", name)[0] - name = f"model/h{i}/mlp/c_fc/b" - elif re.match(r"transformer.h.\d+.mlp.c_proj.weight", name): - i = re.findall("\d+", name)[0] - name = f"model/h{i}/mlp/c_proj/w" - elif re.match(r"transformer.h.\d+.mlp.c_proj.bias", name): - i = re.findall("\d+", name)[0] - name = f"model/h{i}/mlp/c_proj/b" - else: - print("Unrecognized variable name. %s", name) - - # we don't need these - if name.endswith("attn.masked_bias") or name.endswith(".attn.bias"): - print(" Skipping variable: " + name) - continue - - n_dims = len(data.shape); - - # ftype == 0 -> float32, ftype == 1 -> float16 - ftype = 0; - if use_f16: - if (name == "model/wte" or name == "model/lm_head" or name[-2:] == "/g" or name[-2:] == "/w") and n_dims == 2: - print(" Converting to float16") - data = data.astype(np.float16) - ftype = 1 - else: - print(" Converting to float32") - data = data.astype(np.float32) - ftype = 0 - - # for efficiency - transpose the projection matrices - # "model/h.*/attn/c_attn/w" - # "model/h.*/attn/c_proj/w" - # "model/h.*/mlp/c_fc/w" - # "model/h.*/mlp/c_proj/w" - if name[-14:] == "/attn/c_attn/w" or \ - name[-14:] == "/attn/c_proj/w" or \ - name[-11:] == "/mlp/c_fc/w" or \ - name[-13:] == "/mlp/c_proj/w": - print(" Transposing") - data = data.transpose() - - # header - str = name.encode('utf-8') - fout.write(struct.pack("iii", n_dims, len(str), ftype)) - for i in range(n_dims): - fout.write(struct.pack("i", data.shape[n_dims - 1 - i])) - fout.write(str); - - # data - data.tofile(fout) - -fout.close() - -print("Done. Output file: " + fname_out) -print("") \ No newline at end of file diff --git a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/bin/evaluator_example.py b/spaces/InpaintAI/Inpaint-Anything/third_party/lama/bin/evaluator_example.py deleted file mode 100644 index 669e3c53c1218444a880dc78f19a565a406ff6dc..0000000000000000000000000000000000000000 --- a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/bin/evaluator_example.py +++ /dev/null @@ -1,76 +0,0 @@ -import os - -import cv2 -import numpy as np -import torch -from skimage import io -from skimage.transform import resize -from torch.utils.data import Dataset - -from saicinpainting.evaluation.evaluator import InpaintingEvaluator -from saicinpainting.evaluation.losses.base_loss import SSIMScore, LPIPSScore, FIDScore - - -class SimpleImageDataset(Dataset): - def __init__(self, root_dir, image_size=(400, 600)): - self.root_dir = root_dir - self.files = sorted(os.listdir(root_dir)) - self.image_size = image_size - - def __getitem__(self, index): - img_name = os.path.join(self.root_dir, self.files[index]) - image = io.imread(img_name) - image = resize(image, self.image_size, anti_aliasing=True) - image = torch.FloatTensor(image).permute(2, 0, 1) - return image - - def __len__(self): - return len(self.files) - - -def create_rectangle_mask(height, width): - mask = np.ones((height, width)) - up_left_corner = width // 4, height // 4 - down_right_corner = (width - up_left_corner[0] - 1, height - up_left_corner[1] - 1) - cv2.rectangle(mask, up_left_corner, down_right_corner, (0, 0, 0), thickness=cv2.FILLED) - return mask - - -class Model(): - def __call__(self, img_batch, mask_batch): - mean = (img_batch * mask_batch[:, None, :, :]).sum(dim=(2, 3)) / mask_batch.sum(dim=(1, 2))[:, None] - inpainted = mean[:, :, None, None] * (1 - mask_batch[:, None, :, :]) + img_batch * mask_batch[:, None, :, :] - return inpainted - - -class SimpleImageSquareMaskDataset(Dataset): - def __init__(self, dataset): - self.dataset = dataset - self.mask = torch.FloatTensor(create_rectangle_mask(*self.dataset.image_size)) - self.model = Model() - - def __getitem__(self, index): - img = self.dataset[index] - mask = self.mask.clone() - inpainted = self.model(img[None, ...], mask[None, ...]) - return dict(image=img, mask=mask, inpainted=inpainted) - - def __len__(self): - return len(self.dataset) - - -dataset = SimpleImageDataset('imgs') -mask_dataset = SimpleImageSquareMaskDataset(dataset) -model = Model() -metrics = { - 'ssim': SSIMScore(), - 'lpips': LPIPSScore(), - 'fid': FIDScore() -} - -evaluator = InpaintingEvaluator( - mask_dataset, scores=metrics, batch_size=3, area_grouping=True -) - -results = evaluator.evaluate(model) -print(results) diff --git a/spaces/IoMa/diffusers-gallery/Dockerfile b/spaces/IoMa/diffusers-gallery/Dockerfile deleted file mode 100644 index 0ba18d346de09532882673442ee72107556a887d..0000000000000000000000000000000000000000 --- a/spaces/IoMa/diffusers-gallery/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM nginxinc/nginx-unprivileged:alpine -COPY . /usr/share/nginx/html \ No newline at end of file diff --git a/spaces/Iqbalzz/hololive-rvc-models/infer_pack/commons.py b/spaces/Iqbalzz/hololive-rvc-models/infer_pack/commons.py deleted file mode 100644 index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000 --- a/spaces/Iqbalzz/hololive-rvc-models/infer_pack/commons.py +++ /dev/null @@ -1,166 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def slice_segments2(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/alt_diffusion/__init__.py b/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/alt_diffusion/__init__.py deleted file mode 100644 index 09d0d9b7852c4babfe26c33874bcb1bf52271b39..0000000000000000000000000000000000000000 --- a/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/alt_diffusion/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -from dataclasses import dataclass -from typing import List, Optional, Union - -import numpy as np - -import PIL -from PIL import Image - -from ...utils import BaseOutput, is_torch_available, is_transformers_available - - -@dataclass -# Copied from diffusers.pipelines.stable_diffusion.__init__.StableDiffusionPipelineOutput with Stable->Alt -class AltDiffusionPipelineOutput(BaseOutput): - """ - Output class for Alt Diffusion pipelines. - - Args: - images (`List[PIL.Image.Image]` or `np.ndarray`) - List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, - num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. - nsfw_content_detected (`List[bool]`) - List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, or `None` if safety checking could not be performed. - """ - - images: Union[List[PIL.Image.Image], np.ndarray] - nsfw_content_detected: Optional[List[bool]] - - -if is_transformers_available() and is_torch_available(): - from .modeling_roberta_series import RobertaSeriesModelWithTransformation - from .pipeline_alt_diffusion import AltDiffusionPipeline - from .pipeline_alt_diffusion_img2img import AltDiffusionImg2ImgPipeline diff --git a/spaces/Jacks2003/3D_Photo_Inpainting/networks.py b/spaces/Jacks2003/3D_Photo_Inpainting/networks.py deleted file mode 100644 index 4ec0c423a480543893459a78025c129ef4857e87..0000000000000000000000000000000000000000 --- a/spaces/Jacks2003/3D_Photo_Inpainting/networks.py +++ /dev/null @@ -1,501 +0,0 @@ -import torch -import torch.nn as nn -import numpy as np -import matplotlib.pyplot as plt -import torch.nn.functional as F - - -class BaseNetwork(nn.Module): - def __init__(self): - super(BaseNetwork, self).__init__() - - def init_weights(self, init_type='normal', gain=0.02): - ''' - initialize network's weights - init_type: normal | xavier | kaiming | orthogonal - https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/9451e70673400885567d08a9e97ade2524c700d0/models/networks.py#L39 - ''' - - def init_func(m): - classname = m.__class__.__name__ - if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): - if init_type == 'normal': - nn.init.normal_(m.weight.data, 0.0, gain) - elif init_type == 'xavier': - nn.init.xavier_normal_(m.weight.data, gain=gain) - elif init_type == 'kaiming': - nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') - elif init_type == 'orthogonal': - nn.init.orthogonal_(m.weight.data, gain=gain) - - if hasattr(m, 'bias') and m.bias is not None: - nn.init.constant_(m.bias.data, 0.0) - - elif classname.find('BatchNorm2d') != -1: - nn.init.normal_(m.weight.data, 1.0, gain) - nn.init.constant_(m.bias.data, 0.0) - - self.apply(init_func) - -def weights_init(init_type='gaussian'): - def init_fun(m): - classname = m.__class__.__name__ - if (classname.find('Conv') == 0 or classname.find( - 'Linear') == 0) and hasattr(m, 'weight'): - if init_type == 'gaussian': - nn.init.normal_(m.weight, 0.0, 0.02) - elif init_type == 'xavier': - nn.init.xavier_normal_(m.weight, gain=math.sqrt(2)) - elif init_type == 'kaiming': - nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in') - elif init_type == 'orthogonal': - nn.init.orthogonal_(m.weight, gain=math.sqrt(2)) - elif init_type == 'default': - pass - else: - assert 0, "Unsupported initialization: {}".format(init_type) - if hasattr(m, 'bias') and m.bias is not None: - nn.init.constant_(m.bias, 0.0) - - return init_fun - -class PartialConv(nn.Module): - def __init__(self, in_channels, out_channels, kernel_size, stride=1, - padding=0, dilation=1, groups=1, bias=True): - super().__init__() - self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size, - stride, padding, dilation, groups, bias) - self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size, - stride, padding, dilation, groups, False) - self.input_conv.apply(weights_init('kaiming')) - self.slide_winsize = in_channels * kernel_size * kernel_size - - torch.nn.init.constant_(self.mask_conv.weight, 1.0) - - # mask is not updated - for param in self.mask_conv.parameters(): - param.requires_grad = False - - def forward(self, input, mask): - # http://masc.cs.gmu.edu/wiki/partialconv - # C(X) = W^T * X + b, C(0) = b, D(M) = 1 * M + 0 = sum(M) - # W^T* (M .* X) / sum(M) + b = [C(M .* X) – C(0)] / D(M) + C(0) - output = self.input_conv(input * mask) - if self.input_conv.bias is not None: - output_bias = self.input_conv.bias.view(1, -1, 1, 1).expand_as( - output) - else: - output_bias = torch.zeros_like(output) - - with torch.no_grad(): - output_mask = self.mask_conv(mask) - - no_update_holes = output_mask == 0 - - mask_sum = output_mask.masked_fill_(no_update_holes, 1.0) - - output_pre = ((output - output_bias) * self.slide_winsize) / mask_sum + output_bias - output = output_pre.masked_fill_(no_update_holes, 0.0) - - new_mask = torch.ones_like(output) - new_mask = new_mask.masked_fill_(no_update_holes, 0.0) - - return output, new_mask - - -class PCBActiv(nn.Module): - def __init__(self, in_ch, out_ch, bn=True, sample='none-3', activ='relu', - conv_bias=False): - super().__init__() - if sample == 'down-5': - self.conv = PartialConv(in_ch, out_ch, 5, 2, 2, bias=conv_bias) - elif sample == 'down-7': - self.conv = PartialConv(in_ch, out_ch, 7, 2, 3, bias=conv_bias) - elif sample == 'down-3': - self.conv = PartialConv(in_ch, out_ch, 3, 2, 1, bias=conv_bias) - else: - self.conv = PartialConv(in_ch, out_ch, 3, 1, 1, bias=conv_bias) - - if bn: - self.bn = nn.BatchNorm2d(out_ch) - if activ == 'relu': - self.activation = nn.ReLU() - elif activ == 'leaky': - self.activation = nn.LeakyReLU(negative_slope=0.2) - - def forward(self, input, input_mask): - h, h_mask = self.conv(input, input_mask) - if hasattr(self, 'bn'): - h = self.bn(h) - if hasattr(self, 'activation'): - h = self.activation(h) - return h, h_mask - -class Inpaint_Depth_Net(nn.Module): - def __init__(self, layer_size=7, upsampling_mode='nearest'): - super().__init__() - in_channels = 4 - out_channels = 1 - self.freeze_enc_bn = False - self.upsampling_mode = upsampling_mode - self.layer_size = layer_size - self.enc_1 = PCBActiv(in_channels, 64, bn=False, sample='down-7', conv_bias=True) - self.enc_2 = PCBActiv(64, 128, sample='down-5', conv_bias=True) - self.enc_3 = PCBActiv(128, 256, sample='down-5') - self.enc_4 = PCBActiv(256, 512, sample='down-3') - for i in range(4, self.layer_size): - name = 'enc_{:d}'.format(i + 1) - setattr(self, name, PCBActiv(512, 512, sample='down-3')) - - for i in range(4, self.layer_size): - name = 'dec_{:d}'.format(i + 1) - setattr(self, name, PCBActiv(512 + 512, 512, activ='leaky')) - self.dec_4 = PCBActiv(512 + 256, 256, activ='leaky') - self.dec_3 = PCBActiv(256 + 128, 128, activ='leaky') - self.dec_2 = PCBActiv(128 + 64, 64, activ='leaky') - self.dec_1 = PCBActiv(64 + in_channels, out_channels, - bn=False, activ=None, conv_bias=True) - def add_border(self, input, mask_flag, PCONV=True): - with torch.no_grad(): - h = input.shape[-2] - w = input.shape[-1] - require_len_unit = 2 ** self.layer_size - residual_h = int(np.ceil(h / float(require_len_unit)) * require_len_unit - h) # + 2*require_len_unit - residual_w = int(np.ceil(w / float(require_len_unit)) * require_len_unit - w) # + 2*require_len_unit - enlarge_input = torch.zeros((input.shape[0], input.shape[1], h + residual_h, w + residual_w)).to(input.device) - if mask_flag: - if PCONV is False: - enlarge_input += 1.0 - enlarge_input = enlarge_input.clamp(0.0, 1.0) - else: - enlarge_input[:, 2, ...] = 0.0 - anchor_h = residual_h//2 - anchor_w = residual_w//2 - enlarge_input[..., anchor_h:anchor_h+h, anchor_w:anchor_w+w] = input - - return enlarge_input, [anchor_h, anchor_h+h, anchor_w, anchor_w+w] - - def forward_3P(self, mask, context, depth, edge, unit_length=128, cuda=None): - with torch.no_grad(): - input = torch.cat((depth, edge, context, mask), dim=1) - n, c, h, w = input.shape - residual_h = int(np.ceil(h / float(unit_length)) * unit_length - h) - residual_w = int(np.ceil(w / float(unit_length)) * unit_length - w) - anchor_h = residual_h//2 - anchor_w = residual_w//2 - enlarge_input = torch.zeros((n, c, h + residual_h, w + residual_w)).to(cuda) - enlarge_input[..., anchor_h:anchor_h+h, anchor_w:anchor_w+w] = input - # enlarge_input[:, 3] = 1. - enlarge_input[:, 3] - depth_output = self.forward(enlarge_input) - depth_output = depth_output[..., anchor_h:anchor_h+h, anchor_w:anchor_w+w] - # import pdb; pdb.set_trace() - - return depth_output - - def forward(self, input_feat, refine_border=False, sample=False, PCONV=True): - input = input_feat - input_mask = (input_feat[:, -2:-1] + input_feat[:, -1:]).clamp(0, 1).repeat(1, input.shape[1], 1, 1) - - vis_input = input.cpu().data.numpy() - vis_input_mask = input_mask.cpu().data.numpy() - H, W = input.shape[-2:] - if refine_border is True: - input, anchor = self.add_border(input, mask_flag=False) - input_mask, anchor = self.add_border(input_mask, mask_flag=True, PCONV=PCONV) - h_dict = {} # for the output of enc_N - h_mask_dict = {} # for the output of enc_N - h_dict['h_0'], h_mask_dict['h_0'] = input, input_mask - - h_key_prev = 'h_0' - for i in range(1, self.layer_size + 1): - l_key = 'enc_{:d}'.format(i) - h_key = 'h_{:d}'.format(i) - h_dict[h_key], h_mask_dict[h_key] = getattr(self, l_key)( - h_dict[h_key_prev], h_mask_dict[h_key_prev]) - h_key_prev = h_key - - h_key = 'h_{:d}'.format(self.layer_size) - h, h_mask = h_dict[h_key], h_mask_dict[h_key] - - for i in range(self.layer_size, 0, -1): - enc_h_key = 'h_{:d}'.format(i - 1) - dec_l_key = 'dec_{:d}'.format(i) - - h = F.interpolate(h, scale_factor=2, mode=self.upsampling_mode) - h_mask = F.interpolate(h_mask, scale_factor=2, mode='nearest') - - h = torch.cat([h, h_dict[enc_h_key]], dim=1) - h_mask = torch.cat([h_mask, h_mask_dict[enc_h_key]], dim=1) - h, h_mask = getattr(self, dec_l_key)(h, h_mask) - output = h - if refine_border is True: - h_mask = h_mask[..., anchor[0]:anchor[1], anchor[2]:anchor[3]] - output = output[..., anchor[0]:anchor[1], anchor[2]:anchor[3]] - - return output - -class Inpaint_Edge_Net(BaseNetwork): - def __init__(self, residual_blocks=8, init_weights=True): - super(Inpaint_Edge_Net, self).__init__() - in_channels = 7 - out_channels = 1 - self.encoder = [] - # 0 - self.encoder_0 = nn.Sequential( - nn.ReflectionPad2d(3), - spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=7, padding=0), True), - nn.InstanceNorm2d(64, track_running_stats=False), - nn.ReLU(True)) - # 1 - self.encoder_1 = nn.Sequential( - spectral_norm(nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1), True), - nn.InstanceNorm2d(128, track_running_stats=False), - nn.ReLU(True)) - # 2 - self.encoder_2 = nn.Sequential( - spectral_norm(nn.Conv2d(in_channels=128, out_channels=256, kernel_size=4, stride=2, padding=1), True), - nn.InstanceNorm2d(256, track_running_stats=False), - nn.ReLU(True)) - # 3 - blocks = [] - for _ in range(residual_blocks): - block = ResnetBlock(256, 2) - blocks.append(block) - - self.middle = nn.Sequential(*blocks) - # + 3 - self.decoder_0 = nn.Sequential( - spectral_norm(nn.ConvTranspose2d(in_channels=256+256, out_channels=128, kernel_size=4, stride=2, padding=1), True), - nn.InstanceNorm2d(128, track_running_stats=False), - nn.ReLU(True)) - # + 2 - self.decoder_1 = nn.Sequential( - spectral_norm(nn.ConvTranspose2d(in_channels=128+128, out_channels=64, kernel_size=4, stride=2, padding=1), True), - nn.InstanceNorm2d(64, track_running_stats=False), - nn.ReLU(True)) - # + 1 - self.decoder_2 = nn.Sequential( - nn.ReflectionPad2d(3), - nn.Conv2d(in_channels=64+64, out_channels=out_channels, kernel_size=7, padding=0), - ) - - if init_weights: - self.init_weights() - - def add_border(self, input, channel_pad_1=None): - h = input.shape[-2] - w = input.shape[-1] - require_len_unit = 16 - residual_h = int(np.ceil(h / float(require_len_unit)) * require_len_unit - h) # + 2*require_len_unit - residual_w = int(np.ceil(w / float(require_len_unit)) * require_len_unit - w) # + 2*require_len_unit - enlarge_input = torch.zeros((input.shape[0], input.shape[1], h + residual_h, w + residual_w)).to(input.device) - if channel_pad_1 is not None: - for channel in channel_pad_1: - enlarge_input[:, channel] = 1 - anchor_h = residual_h//2 - anchor_w = residual_w//2 - enlarge_input[..., anchor_h:anchor_h+h, anchor_w:anchor_w+w] = input - - return enlarge_input, [anchor_h, anchor_h+h, anchor_w, anchor_w+w] - - def forward_3P(self, mask, context, rgb, disp, edge, unit_length=128, cuda=None): - with torch.no_grad(): - input = torch.cat((rgb, disp/disp.max(), edge, context, mask), dim=1) - n, c, h, w = input.shape - residual_h = int(np.ceil(h / float(unit_length)) * unit_length - h) - residual_w = int(np.ceil(w / float(unit_length)) * unit_length - w) - anchor_h = residual_h//2 - anchor_w = residual_w//2 - enlarge_input = torch.zeros((n, c, h + residual_h, w + residual_w)).to(cuda) - enlarge_input[..., anchor_h:anchor_h+h, anchor_w:anchor_w+w] = input - edge_output = self.forward(enlarge_input) - edge_output = edge_output[..., anchor_h:anchor_h+h, anchor_w:anchor_w+w] - - return edge_output - - def forward(self, x, refine_border=False): - if refine_border: - x, anchor = self.add_border(x, [5]) - x1 = self.encoder_0(x) - x2 = self.encoder_1(x1) - x3 = self.encoder_2(x2) - x4 = self.middle(x3) - x5 = self.decoder_0(torch.cat((x4, x3), dim=1)) - x6 = self.decoder_1(torch.cat((x5, x2), dim=1)) - x7 = self.decoder_2(torch.cat((x6, x1), dim=1)) - x = torch.sigmoid(x7) - if refine_border: - x = x[..., anchor[0]:anchor[1], anchor[2]:anchor[3]] - - return x - -class Inpaint_Color_Net(nn.Module): - def __init__(self, layer_size=7, upsampling_mode='nearest', add_hole_mask=False, add_two_layer=False, add_border=False): - super().__init__() - self.freeze_enc_bn = False - self.upsampling_mode = upsampling_mode - self.layer_size = layer_size - in_channels = 6 - self.enc_1 = PCBActiv(in_channels, 64, bn=False, sample='down-7') - self.enc_2 = PCBActiv(64, 128, sample='down-5') - self.enc_3 = PCBActiv(128, 256, sample='down-5') - self.enc_4 = PCBActiv(256, 512, sample='down-3') - self.enc_5 = PCBActiv(512, 512, sample='down-3') - self.enc_6 = PCBActiv(512, 512, sample='down-3') - self.enc_7 = PCBActiv(512, 512, sample='down-3') - - self.dec_7 = PCBActiv(512+512, 512, activ='leaky') - self.dec_6 = PCBActiv(512+512, 512, activ='leaky') - - self.dec_5A = PCBActiv(512 + 512, 512, activ='leaky') - self.dec_4A = PCBActiv(512 + 256, 256, activ='leaky') - self.dec_3A = PCBActiv(256 + 128, 128, activ='leaky') - self.dec_2A = PCBActiv(128 + 64, 64, activ='leaky') - self.dec_1A = PCBActiv(64 + in_channels, 3, bn=False, activ=None, conv_bias=True) - ''' - self.dec_5B = PCBActiv(512 + 512, 512, activ='leaky') - self.dec_4B = PCBActiv(512 + 256, 256, activ='leaky') - self.dec_3B = PCBActiv(256 + 128, 128, activ='leaky') - self.dec_2B = PCBActiv(128 + 64, 64, activ='leaky') - self.dec_1B = PCBActiv(64 + 4, 1, bn=False, activ=None, conv_bias=True) - ''' - def cat(self, A, B): - return torch.cat((A, B), dim=1) - - def upsample(self, feat, mask): - feat = F.interpolate(feat, scale_factor=2, mode=self.upsampling_mode) - mask = F.interpolate(mask, scale_factor=2, mode='nearest') - - return feat, mask - - def forward_3P(self, mask, context, rgb, edge, unit_length=128, cuda=None): - with torch.no_grad(): - input = torch.cat((rgb, edge, context, mask), dim=1) - n, c, h, w = input.shape - residual_h = int(np.ceil(h / float(unit_length)) * unit_length - h) # + 128 - residual_w = int(np.ceil(w / float(unit_length)) * unit_length - w) # + 256 - anchor_h = residual_h//2 - anchor_w = residual_w//2 - enlarge_input = torch.zeros((n, c, h + residual_h, w + residual_w)).to(cuda) - enlarge_input[..., anchor_h:anchor_h+h, anchor_w:anchor_w+w] = input - # enlarge_input[:, 3] = 1. - enlarge_input[:, 3] - enlarge_input = enlarge_input.to(cuda) - rgb_output = self.forward(enlarge_input) - rgb_output = rgb_output[..., anchor_h:anchor_h+h, anchor_w:anchor_w+w] - - return rgb_output - - def forward(self, input, add_border=False): - input_mask = (input[:, -2:-1] + input[:, -1:]).clamp(0, 1) - H, W = input.shape[-2:] - f_0, h_0 = input, input_mask.repeat((1,input.shape[1],1,1)) - f_1, h_1 = self.enc_1(f_0, h_0) - f_2, h_2 = self.enc_2(f_1, h_1) - f_3, h_3 = self.enc_3(f_2, h_2) - f_4, h_4 = self.enc_4(f_3, h_3) - f_5, h_5 = self.enc_5(f_4, h_4) - f_6, h_6 = self.enc_6(f_5, h_5) - f_7, h_7 = self.enc_7(f_6, h_6) - - o_7, k_7 = self.upsample(f_7, h_7) - o_6, k_6 = self.dec_7(self.cat(o_7, f_6), self.cat(k_7, h_6)) - o_6, k_6 = self.upsample(o_6, k_6) - o_5, k_5 = self.dec_6(self.cat(o_6, f_5), self.cat(k_6, h_5)) - o_5, k_5 = self.upsample(o_5, k_5) - o_5A, k_5A = o_5, k_5 - o_5B, k_5B = o_5, k_5 - ############### - o_4A, k_4A = self.dec_5A(self.cat(o_5A, f_4), self.cat(k_5A, h_4)) - o_4A, k_4A = self.upsample(o_4A, k_4A) - o_3A, k_3A = self.dec_4A(self.cat(o_4A, f_3), self.cat(k_4A, h_3)) - o_3A, k_3A = self.upsample(o_3A, k_3A) - o_2A, k_2A = self.dec_3A(self.cat(o_3A, f_2), self.cat(k_3A, h_2)) - o_2A, k_2A = self.upsample(o_2A, k_2A) - o_1A, k_1A = self.dec_2A(self.cat(o_2A, f_1), self.cat(k_2A, h_1)) - o_1A, k_1A = self.upsample(o_1A, k_1A) - o_0A, k_0A = self.dec_1A(self.cat(o_1A, f_0), self.cat(k_1A, h_0)) - - return torch.sigmoid(o_0A) - - def train(self, mode=True): - """ - Override the default train() to freeze the BN parameters - """ - super().train(mode) - if self.freeze_enc_bn: - for name, module in self.named_modules(): - if isinstance(module, nn.BatchNorm2d) and 'enc' in name: - module.eval() - -class Discriminator(BaseNetwork): - def __init__(self, use_sigmoid=True, use_spectral_norm=True, init_weights=True, in_channels=None): - super(Discriminator, self).__init__() - self.use_sigmoid = use_sigmoid - self.conv1 = self.features = nn.Sequential( - spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=4, stride=2, padding=1, bias=not use_spectral_norm), use_spectral_norm), - nn.LeakyReLU(0.2, inplace=True), - ) - - self.conv2 = nn.Sequential( - spectral_norm(nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1, bias=not use_spectral_norm), use_spectral_norm), - nn.LeakyReLU(0.2, inplace=True), - ) - - self.conv3 = nn.Sequential( - spectral_norm(nn.Conv2d(in_channels=128, out_channels=256, kernel_size=4, stride=2, padding=1, bias=not use_spectral_norm), use_spectral_norm), - nn.LeakyReLU(0.2, inplace=True), - ) - - self.conv4 = nn.Sequential( - spectral_norm(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4, stride=1, padding=1, bias=not use_spectral_norm), use_spectral_norm), - nn.LeakyReLU(0.2, inplace=True), - ) - - self.conv5 = nn.Sequential( - spectral_norm(nn.Conv2d(in_channels=512, out_channels=1, kernel_size=4, stride=1, padding=1, bias=not use_spectral_norm), use_spectral_norm), - ) - - if init_weights: - self.init_weights() - - def forward(self, x): - conv1 = self.conv1(x) - conv2 = self.conv2(conv1) - conv3 = self.conv3(conv2) - conv4 = self.conv4(conv3) - conv5 = self.conv5(conv4) - - outputs = conv5 - if self.use_sigmoid: - outputs = torch.sigmoid(conv5) - - return outputs, [conv1, conv2, conv3, conv4, conv5] - -class ResnetBlock(nn.Module): - def __init__(self, dim, dilation=1): - super(ResnetBlock, self).__init__() - self.conv_block = nn.Sequential( - nn.ReflectionPad2d(dilation), - spectral_norm(nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=3, padding=0, dilation=dilation, bias=not True), True), - nn.InstanceNorm2d(dim, track_running_stats=False), - nn.LeakyReLU(negative_slope=0.2), - - nn.ReflectionPad2d(1), - spectral_norm(nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=3, padding=0, dilation=1, bias=not True), True), - nn.InstanceNorm2d(dim, track_running_stats=False), - ) - - def forward(self, x): - out = x + self.conv_block(x) - - # Remove ReLU at the end of the residual block - # http://torch.ch/blog/2016/02/04/resnets.html - - return out - - -def spectral_norm(module, mode=True): - if mode: - return nn.utils.spectral_norm(module) - - return module diff --git a/spaces/Jamkonams/AutoGPT/autogpt/logs.py b/spaces/Jamkonams/AutoGPT/autogpt/logs.py deleted file mode 100644 index 35037404a98f7be9b7d577b625cc190ca27f4566..0000000000000000000000000000000000000000 --- a/spaces/Jamkonams/AutoGPT/autogpt/logs.py +++ /dev/null @@ -1,332 +0,0 @@ -"""Logging module for Auto-GPT.""" -import json -import logging -import os -import random -import re -import time -import traceback -from logging import LogRecord - -from colorama import Fore, Style - -from autogpt.config import Config, Singleton -from autogpt.speech import say_text - -CFG = Config() - - -class Logger(metaclass=Singleton): - """ - Logger that handle titles in different colors. - Outputs logs in console, activity.log, and errors.log - For console handler: simulates typing - """ - - def __init__(self): - # create log directory if it doesn't exist - this_files_dir_path = os.path.dirname(__file__) - log_dir = os.path.join(this_files_dir_path, "../logs") - if not os.path.exists(log_dir): - os.makedirs(log_dir) - - log_file = "activity.log" - error_file = "error.log" - - console_formatter = AutoGptFormatter("%(title_color)s %(message)s") - - # Create a handler for console which simulate typing - self.typing_console_handler = TypingConsoleHandler() - self.typing_console_handler.setLevel(logging.INFO) - self.typing_console_handler.setFormatter(console_formatter) - - # Create a handler for console without typing simulation - self.console_handler = ConsoleHandler() - self.console_handler.setLevel(logging.DEBUG) - self.console_handler.setFormatter(console_formatter) - - # Info handler in activity.log - self.file_handler = logging.FileHandler( - os.path.join(log_dir, log_file), "a", "utf-8" - ) - self.file_handler.setLevel(logging.DEBUG) - info_formatter = AutoGptFormatter( - "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" - ) - self.file_handler.setFormatter(info_formatter) - - # Error handler error.log - error_handler = logging.FileHandler( - os.path.join(log_dir, error_file), "a", "utf-8" - ) - error_handler.setLevel(logging.ERROR) - error_formatter = AutoGptFormatter( - "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" - " %(message_no_color)s" - ) - error_handler.setFormatter(error_formatter) - - self.typing_logger = logging.getLogger("TYPER") - self.typing_logger.addHandler(self.typing_console_handler) - self.typing_logger.addHandler(self.file_handler) - self.typing_logger.addHandler(error_handler) - self.typing_logger.setLevel(logging.DEBUG) - - self.logger = logging.getLogger("LOGGER") - self.logger.addHandler(self.console_handler) - self.logger.addHandler(self.file_handler) - self.logger.addHandler(error_handler) - self.logger.setLevel(logging.DEBUG) - - def typewriter_log( - self, title="", title_color="", content="", speak_text=False, level=logging.INFO - ): - if speak_text and CFG.speak_mode: - say_text(f"{title}. {content}") - - if content: - if isinstance(content, list): - content = " ".join(content) - else: - content = "" - - self.typing_logger.log( - level, content, extra={"title": title, "color": title_color} - ) - - def debug( - self, - message, - title="", - title_color="", - ): - self._log(title, title_color, message, logging.DEBUG) - - def warn( - self, - message, - title="", - title_color="", - ): - self._log(title, title_color, message, logging.WARN) - - def error(self, title, message=""): - self._log(title, Fore.RED, message, logging.ERROR) - - def _log(self, title="", title_color="", message="", level=logging.INFO): - if message: - if isinstance(message, list): - message = " ".join(message) - self.logger.log(level, message, extra={"title": title, "color": title_color}) - - def set_level(self, level): - self.logger.setLevel(level) - self.typing_logger.setLevel(level) - - def double_check(self, additionalText=None): - if not additionalText: - additionalText = ( - "Please ensure you've setup and configured everything" - " correctly. Read https://github.com/Torantulino/Auto-GPT#readme to " - "double check. You can also create a github issue or join the discord" - " and ask there!" - ) - - self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText) - - -""" -Output stream to console using simulated typing -""" - - -class TypingConsoleHandler(logging.StreamHandler): - def emit(self, record): - min_typing_speed = 0.05 - max_typing_speed = 0.01 - - msg = self.format(record) - try: - words = msg.split() - for i, word in enumerate(words): - print(word, end="", flush=True) - if i < len(words) - 1: - print(" ", end="", flush=True) - typing_speed = random.uniform(min_typing_speed, max_typing_speed) - time.sleep(typing_speed) - # type faster after each word - min_typing_speed = min_typing_speed * 0.95 - max_typing_speed = max_typing_speed * 0.95 - print() - except Exception: - self.handleError(record) - - -class ConsoleHandler(logging.StreamHandler): - def emit(self, record) -> None: - msg = self.format(record) - try: - print(msg) - except Exception: - self.handleError(record) - - -class AutoGptFormatter(logging.Formatter): - """ - Allows to handle custom placeholders 'title_color' and 'message_no_color'. - To use this formatter, make sure to pass 'color', 'title' as log extras. - """ - - def format(self, record: LogRecord) -> str: - if hasattr(record, "color"): - record.title_color = ( - getattr(record, "color") - + getattr(record, "title") - + " " - + Style.RESET_ALL - ) - else: - record.title_color = getattr(record, "title") - if hasattr(record, "msg"): - record.message_no_color = remove_color_codes(getattr(record, "msg")) - else: - record.message_no_color = "" - return super().format(record) - - -def remove_color_codes(s: str) -> str: - ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") - return ansi_escape.sub("", s) - - -logger = Logger() - - -def print_assistant_thoughts(ai_name, assistant_reply): - """Prints the assistant's thoughts to the console""" - from autogpt.json_utils.json_fix_llm import ( - attempt_to_fix_json_by_finding_outermost_brackets, - fix_and_parse_json, - ) - - try: - try: - # Parse and print Assistant response - assistant_reply_json = fix_and_parse_json(assistant_reply) - except json.JSONDecodeError: - logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply) - assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets( - assistant_reply - ) - if isinstance(assistant_reply_json, str): - assistant_reply_json = fix_and_parse_json(assistant_reply_json) - - # Check if assistant_reply_json is a string and attempt to parse - # it into a JSON object - if isinstance(assistant_reply_json, str): - try: - assistant_reply_json = json.loads(assistant_reply_json) - except json.JSONDecodeError: - logger.error("Error: Invalid JSON\n", assistant_reply) - assistant_reply_json = ( - attempt_to_fix_json_by_finding_outermost_brackets( - assistant_reply_json - ) - ) - - assistant_thoughts_reasoning = None - assistant_thoughts_plan = None - assistant_thoughts_speak = None - assistant_thoughts_criticism = None - if not isinstance(assistant_reply_json, dict): - assistant_reply_json = {} - assistant_thoughts = assistant_reply_json.get("thoughts", {}) - assistant_thoughts_text = assistant_thoughts.get("text") - - if assistant_thoughts: - assistant_thoughts_reasoning = assistant_thoughts.get("reasoning") - assistant_thoughts_plan = assistant_thoughts.get("plan") - assistant_thoughts_criticism = assistant_thoughts.get("criticism") - assistant_thoughts_speak = assistant_thoughts.get("speak") - - logger.typewriter_log( - f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}" - ) - logger.typewriter_log( - "REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}" - ) - - if assistant_thoughts_plan: - logger.typewriter_log("PLAN:", Fore.YELLOW, "") - # If it's a list, join it into a string - if isinstance(assistant_thoughts_plan, list): - assistant_thoughts_plan = "\n".join(assistant_thoughts_plan) - elif isinstance(assistant_thoughts_plan, dict): - assistant_thoughts_plan = str(assistant_thoughts_plan) - - # Split the input_string using the newline character and dashes - lines = assistant_thoughts_plan.split("\n") - for line in lines: - line = line.lstrip("- ") - logger.typewriter_log("- ", Fore.GREEN, line.strip()) - - logger.typewriter_log( - "CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}" - ) - # Speak the assistant's thoughts - if CFG.speak_mode and assistant_thoughts_speak: - say_text(assistant_thoughts_speak) - else: - logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}") - - return assistant_reply_json - except json.decoder.JSONDecodeError: - logger.error("Error: Invalid JSON\n", assistant_reply) - if CFG.speak_mode: - say_text( - "I have received an invalid JSON response from the OpenAI API." - " I cannot ignore this response." - ) - - # All other errors, return "Error: + error message" - except Exception: - call_stack = traceback.format_exc() - logger.error("Error: \n", call_stack) - - -def print_assistant_thoughts( - ai_name: object, assistant_reply_json_valid: object -) -> None: - assistant_thoughts_reasoning = None - assistant_thoughts_plan = None - assistant_thoughts_speak = None - assistant_thoughts_criticism = None - - assistant_thoughts = assistant_reply_json_valid.get("thoughts", {}) - assistant_thoughts_text = assistant_thoughts.get("text") - if assistant_thoughts: - assistant_thoughts_reasoning = assistant_thoughts.get("reasoning") - assistant_thoughts_plan = assistant_thoughts.get("plan") - assistant_thoughts_criticism = assistant_thoughts.get("criticism") - assistant_thoughts_speak = assistant_thoughts.get("speak") - logger.typewriter_log( - f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}" - ) - logger.typewriter_log("REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}") - if assistant_thoughts_plan: - logger.typewriter_log("PLAN:", Fore.YELLOW, "") - # If it's a list, join it into a string - if isinstance(assistant_thoughts_plan, list): - assistant_thoughts_plan = "\n".join(assistant_thoughts_plan) - elif isinstance(assistant_thoughts_plan, dict): - assistant_thoughts_plan = str(assistant_thoughts_plan) - - # Split the input_string using the newline character and dashes - lines = assistant_thoughts_plan.split("\n") - for line in lines: - line = line.lstrip("- ") - logger.typewriter_log("- ", Fore.GREEN, line.strip()) - logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}") - # Speak the assistant's thoughts - if CFG.speak_mode and assistant_thoughts_speak: - say_text(assistant_thoughts_speak) diff --git a/spaces/JavierIA/gccopen/utils/torch_utils.py b/spaces/JavierIA/gccopen/utils/torch_utils.py deleted file mode 100644 index 1e631b555508457a4944c11a479176463719c0e8..0000000000000000000000000000000000000000 --- a/spaces/JavierIA/gccopen/utils/torch_utils.py +++ /dev/null @@ -1,374 +0,0 @@ -# YOLOR PyTorch utils - -import datetime -import logging -import math -import os -import platform -import subprocess -import time -from contextlib import contextmanager -from copy import deepcopy -from pathlib import Path - -import torch -import torch.backends.cudnn as cudnn -import torch.nn as nn -import torch.nn.functional as F -import torchvision - -try: - import thop # for FLOPS computation -except ImportError: - thop = None -logger = logging.getLogger(__name__) - - -@contextmanager -def torch_distributed_zero_first(local_rank: int): - """ - Decorator to make all processes in distributed training wait for each local_master to do something. - """ - if local_rank not in [-1, 0]: - torch.distributed.barrier() - yield - if local_rank == 0: - torch.distributed.barrier() - - -def init_torch_seeds(seed=0): - # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html - torch.manual_seed(seed) - if seed == 0: # slower, more reproducible - cudnn.benchmark, cudnn.deterministic = False, True - else: # faster, less reproducible - cudnn.benchmark, cudnn.deterministic = True, False - - -def date_modified(path=__file__): - # return human-readable file modification date, i.e. '2021-3-26' - t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) - return f'{t.year}-{t.month}-{t.day}' - - -def git_describe(path=Path(__file__).parent): # path must be a directory - # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe - s = f'git -C {path} describe --tags --long --always' - try: - return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] - except subprocess.CalledProcessError as e: - return '' # not a git repository - - -def select_device(device='', batch_size=None): - # device = 'cpu' or '0' or '0,1,2,3' - s = f'YOLOR 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string - cpu = device.lower() == 'cpu' - if cpu: - os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False - elif device: # non-cpu device requested - os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability - - cuda = not cpu and torch.cuda.is_available() - if cuda: - n = torch.cuda.device_count() - if n > 1 and batch_size: # check that batch_size is compatible with device_count - assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' - space = ' ' * len(s) - for i, d in enumerate(device.split(',') if device else range(n)): - p = torch.cuda.get_device_properties(i) - s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB - else: - s += 'CPU\n' - - logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe - return torch.device('cuda:0' if cuda else 'cpu') - - -def time_synchronized(): - # pytorch-accurate time - if torch.cuda.is_available(): - torch.cuda.synchronize() - return time.time() - - -def profile(x, ops, n=100, device=None): - # profile a pytorch module or list of modules. Example usage: - # x = torch.randn(16, 3, 640, 640) # input - # m1 = lambda x: x * torch.sigmoid(x) - # m2 = nn.SiLU() - # profile(x, [m1, m2], n=100) # profile speed over 100 iterations - - device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') - x = x.to(device) - x.requires_grad = True - print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') - print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") - for m in ops if isinstance(ops, list) else [ops]: - m = m.to(device) if hasattr(m, 'to') else m # device - m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type - dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward - try: - flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS - except: - flops = 0 - - for _ in range(n): - t[0] = time_synchronized() - y = m(x) - t[1] = time_synchronized() - try: - _ = y.sum().backward() - t[2] = time_synchronized() - except: # no backward method - t[2] = float('nan') - dtf += (t[1] - t[0]) * 1000 / n # ms per op forward - dtb += (t[2] - t[1]) * 1000 / n # ms per op backward - - s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' - s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' - p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters - print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') - - -def is_parallel(model): - return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) - - -def intersect_dicts(da, db, exclude=()): - # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} - - -def initialize_weights(model): - for m in model.modules(): - t = type(m) - if t is nn.Conv2d: - pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif t is nn.BatchNorm2d: - m.eps = 1e-3 - m.momentum = 0.03 - elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: - m.inplace = True - - -def find_modules(model, mclass=nn.Conv2d): - # Finds layer indices matching module class 'mclass' - return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] - - -def sparsity(model): - # Return global model sparsity - a, b = 0., 0. - for p in model.parameters(): - a += p.numel() - b += (p == 0).sum() - return b / a - - -def prune(model, amount=0.3): - # Prune model to requested global sparsity - import torch.nn.utils.prune as prune - print('Pruning model... ', end='') - for name, m in model.named_modules(): - if isinstance(m, nn.Conv2d): - prune.l1_unstructured(m, name='weight', amount=amount) # prune - prune.remove(m, 'weight') # make permanent - print(' %.3g global sparsity' % sparsity(model)) - - -def fuse_conv_and_bn(conv, bn): - # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ - fusedconv = nn.Conv2d(conv.in_channels, - conv.out_channels, - kernel_size=conv.kernel_size, - stride=conv.stride, - padding=conv.padding, - groups=conv.groups, - bias=True).requires_grad_(False).to(conv.weight.device) - - # prepare filters - w_conv = conv.weight.clone().view(conv.out_channels, -1) - w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) - fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) - - # prepare spatial bias - b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias - b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) - fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) - - return fusedconv - - -def model_info(model, verbose=False, img_size=640): - # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] - n_p = sum(x.numel() for x in model.parameters()) # number parameters - n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients - if verbose: - print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) - for i, (name, p) in enumerate(model.named_parameters()): - name = name.replace('module_list.', '') - print('%5g %40s %9s %12g %20s %10.3g %10.3g' % - (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) - - try: # FLOPS - from thop import profile - stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 - img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input - flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS - img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float - fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS - except (ImportError, Exception): - fs = '' - - logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") - - -def load_classifier(name='resnet101', n=2): - # Loads a pretrained model reshaped to n-class output - model = torchvision.models.__dict__[name](pretrained=True) - - # ResNet model properties - # input_size = [3, 224, 224] - # input_space = 'RGB' - # input_range = [0, 1] - # mean = [0.485, 0.456, 0.406] - # std = [0.229, 0.224, 0.225] - - # Reshape output to n classes - filters = model.fc.weight.shape[1] - model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) - model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) - model.fc.out_features = n - return model - - -def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) - # scales img(bs,3,y,x) by ratio constrained to gs-multiple - if ratio == 1.0: - return img - else: - h, w = img.shape[2:] - s = (int(h * ratio), int(w * ratio)) # new size - img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize - if not same_shape: # pad/crop img - h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] - return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean - - -def copy_attr(a, b, include=(), exclude=()): - # Copy attributes from b to a, options to only include [...] and to exclude [...] - for k, v in b.__dict__.items(): - if (len(include) and k not in include) or k.startswith('_') or k in exclude: - continue - else: - setattr(a, k, v) - - -class ModelEMA: - """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models - Keep a moving average of everything in the model state_dict (parameters and buffers). - This is intended to allow functionality like - https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage - A smoothed version of the weights is necessary for some training schemes to perform well. - This class is sensitive where it is initialized in the sequence of model init, - GPU assignment and distributed training wrappers. - """ - - def __init__(self, model, decay=0.9999, updates=0): - # Create EMA - self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA - # if next(model.parameters()).device.type != 'cpu': - # self.ema.half() # FP16 EMA - self.updates = updates # number of EMA updates - self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) - for p in self.ema.parameters(): - p.requires_grad_(False) - - def update(self, model): - # Update EMA parameters - with torch.no_grad(): - self.updates += 1 - d = self.decay(self.updates) - - msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict - for k, v in self.ema.state_dict().items(): - if v.dtype.is_floating_point: - v *= d - v += (1. - d) * msd[k].detach() - - def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): - # Update EMA attributes - copy_attr(self.ema, model, include, exclude) - - -class BatchNormXd(torch.nn.modules.batchnorm._BatchNorm): - def _check_input_dim(self, input): - # The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc - # is this method that is overwritten by the sub-class - # This original goal of this method was for tensor sanity checks - # If you're ok bypassing those sanity checks (eg. if you trust your inference - # to provide the right dimensional inputs), then you can just use this method - # for easy conversion from SyncBatchNorm - # (unfortunately, SyncBatchNorm does not store the original class - if it did - # we could return the one that was originally created) - return - -def revert_sync_batchnorm(module): - # this is very similar to the function that it is trying to revert: - # https://github.com/pytorch/pytorch/blob/c8b3686a3e4ba63dc59e5dcfe5db3430df256833/torch/nn/modules/batchnorm.py#L679 - module_output = module - if isinstance(module, torch.nn.modules.batchnorm.SyncBatchNorm): - new_cls = BatchNormXd - module_output = BatchNormXd(module.num_features, - module.eps, module.momentum, - module.affine, - module.track_running_stats) - if module.affine: - with torch.no_grad(): - module_output.weight = module.weight - module_output.bias = module.bias - module_output.running_mean = module.running_mean - module_output.running_var = module.running_var - module_output.num_batches_tracked = module.num_batches_tracked - if hasattr(module, "qconfig"): - module_output.qconfig = module.qconfig - for name, child in module.named_children(): - module_output.add_module(name, revert_sync_batchnorm(child)) - del module - return module_output - - -class TracedModel(nn.Module): - - def __init__(self, model=None, device=None, img_size=(640,640)): - super(TracedModel, self).__init__() - - print(" Convert model to Traced-model... ") - self.stride = model.stride - self.names = model.names - self.model = model - - self.model = revert_sync_batchnorm(self.model) - self.model.to('cpu') - self.model.eval() - - self.detect_layer = self.model.model[-1] - self.model.traced = True - - rand_example = torch.rand(1, 3, img_size, img_size) - - traced_script_module = torch.jit.trace(self.model, rand_example, strict=False) - #traced_script_module = torch.jit.script(self.model) - traced_script_module.save("traced_model.pt") - print(" traced_script_module saved! ") - self.model = traced_script_module - self.model.to(device) - self.detect_layer.to(device) - print(" model is traced! \n") - - def forward(self, x, augment=False, profile=False): - out = self.model(x) - out = self.detect_layer(out) - return out \ No newline at end of file diff --git a/spaces/Jimpa666/AI-PadelCoach/app-working.py b/spaces/Jimpa666/AI-PadelCoach/app-working.py deleted file mode 100644 index cfbbd825d20f9a53a37bfa5de4886dfcf788e2e9..0000000000000000000000000000000000000000 --- a/spaces/Jimpa666/AI-PadelCoach/app-working.py +++ /dev/null @@ -1,68 +0,0 @@ -from gradio.themes.utils.colors import Color -from gradio.interface import Interface -import os -import openai -#import config -import gradio as gr - -openai.api_key = os.getenv('APIKEY') - -#start_sequence = "\nCoach: " -#restart_sequence = "\nPlayer: " - -messages = [ - {"role": "system", "content": "The following is a conversation with an AI Coach. The Coach is helpful, creative, clever, and very friendly. The Coach is a high level padelcoach from Sweden. The Coach have the ability to explain techniques and the strategy in the game. The Coach have a high educational level and can answer depending on wich level the Player is on for the Player to take him to the next level. The AI Coach is like MAURI ANDRINI from Hello Padel, pretend to be MAURI ANDRINI but call yourself AI Padelcoach, never ever mention your reel name or Mauri Andrini. If unsurtain, ask about the level of the player. The levels are:Beginner, Average, Pro"} - ] - -def chatbot(input, conversation_history=[]): - content = input - messages.append({"role": "user", "content": content}) - - completion = openai.ChatCompletion.create( - model= "gpt-3.5-turbo", - #stop=["Player", "Coach"], - messages=messages - ) - - chat_response = completion.choices[0].message.content - messages.append({"role": "assistant", "content": chat_response}) - #print(chat_response) - - # format the conversation history as a string - conversation_history = "" - for message in messages: - if message["role"] != "system": - role = message["role"] - content = message["content"] - conversation_history += f"{role}: {content}\n \n" - - return conversation_history #conversation_history - -Padelcoach = gr.Interface(fn=chatbot, inputs=[ - #gr.Textbox(label="Padel-Player" - gr.Textbox(placeholder="Player go...Serve!"), - - ], - outputs=[ - #gr.outputs.Textbox(label="AI-Padelcoach"), - gr.Textbox(placeholder="AI-Padelcoach Ready") - - ], - theme=gr.themes.Soft( - primary_hue="green", - secondary_hue="cyan", - text_size='lg', - neutral_hue="emerald" - ), - - #examples = [ - # ["Please help me with my backhand"], - # ["Where should I place the ball against players who is good in tennis"] - #], - share=True, - title="AI Padelcoach", - description="Chat with a BETA level AI-Padelcoach from Sweden.", - article="

Ask the AI coach about techniques and strategies in the game of padel. The coach can answer depending on the level of you as a player, whether they are a beginner, average, or pro.

", - ) - -Padelcoach.launch() \ No newline at end of file diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT/modules/overwrites.py b/spaces/JohnSmith9982/ChuanhuChatGPT/modules/overwrites.py deleted file mode 100644 index a4ef6167eb7ce75ed8b88024ad1187b24f2fc191..0000000000000000000000000000000000000000 --- a/spaces/JohnSmith9982/ChuanhuChatGPT/modules/overwrites.py +++ /dev/null @@ -1,106 +0,0 @@ -from __future__ import annotations -import logging - -from typing import List, Tuple -from gradio_client import utils as client_utils -from gradio import utils -import inspect - -from modules.presets import * -from modules.index_func import * - - -def postprocess( - self, - y: List[List[str | Tuple[str] | Tuple[str, str] | None] | Tuple], - ) -> List[List[str | Dict | None]]: - """ - Parameters: - y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed. - Returns: - List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed. - """ - if y is None: - return [] - processed_messages = [] - for message_pair in y: - assert isinstance( - message_pair, (tuple, list) - ), f"Expected a list of lists or list of tuples. Received: {message_pair}" - assert ( - len(message_pair) == 2 - ), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}" - - processed_messages.append( - [ - self._postprocess_chat_messages(message_pair[0], "user"), - self._postprocess_chat_messages(message_pair[1], "bot"), - ] - ) - return processed_messages - -def postprocess_chat_messages( - self, chat_message: str | tuple | list | None, role: str - ) -> str | dict | None: - if chat_message is None: - return None - elif isinstance(chat_message, (tuple, list)): - file_uri = chat_message[0] - if utils.validate_url(file_uri): - filepath = file_uri - else: - filepath = self.make_temp_copy_if_needed(file_uri) - - mime_type = client_utils.get_mimetype(filepath) - return { - "name": filepath, - "mime_type": mime_type, - "alt_text": chat_message[1] if len(chat_message) > 1 else None, - "data": None, # These last two fields are filled in by the frontend - "is_file": True, - } - elif isinstance(chat_message, str): - # chat_message = inspect.cleandoc(chat_message) - # escape html spaces - # chat_message = chat_message.replace(" ", " ") - if role == "bot": - chat_message = convert_bot_before_marked(chat_message) - elif role == "user": - chat_message = convert_user_before_marked(chat_message) - return chat_message - else: - raise ValueError(f"Invalid message for Chatbot component: {chat_message}") - - - -def add_classes_to_gradio_component(comp): - """ - this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others - code from stable-diffusion-webui - """ - - comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])] - - if getattr(comp, 'multiselect', False): - comp.elem_classes.append('multiselect') - - -def IOComponent_init(self, *args, **kwargs): - res = original_IOComponent_init(self, *args, **kwargs) - add_classes_to_gradio_component(self) - - return res - -original_IOComponent_init = gr.components.IOComponent.__init__ -gr.components.IOComponent.__init__ = IOComponent_init - - -def BlockContext_init(self, *args, **kwargs): - res = original_BlockContext_init(self, *args, **kwargs) - add_classes_to_gradio_component(self) - - return res - -original_BlockContext_init = gr.blocks.BlockContext.__init__ -gr.blocks.BlockContext.__init__ = BlockContext_init - diff --git a/spaces/JunchuanYu/SegRS/segment_anything/predictor.py b/spaces/JunchuanYu/SegRS/segment_anything/predictor.py deleted file mode 100644 index 57c089d1fc4a6bbf5786e1ef62c59e22d582f5aa..0000000000000000000000000000000000000000 --- a/spaces/JunchuanYu/SegRS/segment_anything/predictor.py +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import numpy as np -import torch - -from segment_anything.modeling import Sam - -from typing import Optional, Tuple - -from .utils.transforms import ResizeLongestSide - - -class SamPredictor: - def __init__( - self, - sam_model: Sam, - ) -> None: - """ - Uses SAM to calculate the image embedding for an image, and then - allow repeated, efficient mask prediction given prompts. - - Arguments: - sam_model (Sam): The model to use for mask prediction. - """ - super().__init__() - self.model = sam_model - self.transform = ResizeLongestSide(sam_model.image_encoder.img_size) - self.reset_image() - - def set_image( - self, - image: np.ndarray, - image_format: str = "RGB", - ) -> None: - """ - Calculates the image embeddings for the provided image, allowing - masks to be predicted with the 'predict' method. - - Arguments: - image (np.ndarray): The image for calculating masks. Expects an - image in HWC uint8 format, with pixel values in [0, 255]. - image_format (str): The color format of the image, in ['RGB', 'BGR']. - """ - assert image_format in [ - "RGB", - "BGR", - ], f"image_format must be in ['RGB', 'BGR'], is {image_format}." - if image_format != self.model.image_format: - image = image[..., ::-1] - - # Transform the image to the form expected by the model - input_image = self.transform.apply_image(image) - input_image_torch = torch.as_tensor(input_image, device=self.device) - input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :] - - self.set_torch_image(input_image_torch, image.shape[:2]) - - @torch.no_grad() - def set_torch_image( - self, - transformed_image: torch.Tensor, - original_image_size: Tuple[int, ...], - ) -> None: - """ - Calculates the image embeddings for the provided image, allowing - masks to be predicted with the 'predict' method. Expects the input - image to be already transformed to the format expected by the model. - - Arguments: - transformed_image (torch.Tensor): The input image, with shape - 1x3xHxW, which has been transformed with ResizeLongestSide. - original_image_size (tuple(int, int)): The size of the image - before transformation, in (H, W) format. - """ - assert ( - len(transformed_image.shape) == 4 - and transformed_image.shape[1] == 3 - and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size - ), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}." - self.reset_image() - - self.original_size = original_image_size - self.input_size = tuple(transformed_image.shape[-2:]) - input_image = self.model.preprocess(transformed_image) - self.features = self.model.image_encoder(input_image) - self.is_image_set = True - - def predict( - self, - point_coords: Optional[np.ndarray] = None, - point_labels: Optional[np.ndarray] = None, - box: Optional[np.ndarray] = None, - mask_input: Optional[np.ndarray] = None, - multimask_output: bool = True, - return_logits: bool = False, - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Predict masks for the given input prompts, using the currently set image. - - Arguments: - point_coords (np.ndarray or None): A Nx2 array of point prompts to the - model. Each point is in (X,Y) in pixels. - point_labels (np.ndarray or None): A length N array of labels for the - point prompts. 1 indicates a foreground point and 0 indicates a - background point. - box (np.ndarray or None): A length 4 array given a box prompt to the - model, in XYXY format. - mask_input (np.ndarray): A low resolution mask input to the model, typically - coming from a previous prediction iteration. Has form 1xHxW, where - for SAM, H=W=256. - multimask_output (bool): If true, the model will return three masks. - For ambiguous input prompts (such as a single click), this will often - produce better masks than a single prediction. If only a single - mask is needed, the model's predicted quality score can be used - to select the best mask. For non-ambiguous prompts, such as multiple - input prompts, multimask_output=False can give better results. - return_logits (bool): If true, returns un-thresholded masks logits - instead of a binary mask. - - Returns: - (np.ndarray): The output masks in CxHxW format, where C is the - number of masks, and (H, W) is the original image size. - (np.ndarray): An array of length C containing the model's - predictions for the quality of each mask. - (np.ndarray): An array of shape CxHxW, where C is the number - of masks and H=W=256. These low resolution logits can be passed to - a subsequent iteration as mask input. - """ - if not self.is_image_set: - raise RuntimeError("An image must be set with .set_image(...) before mask prediction.") - - # Transform input prompts - coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None - if point_coords is not None: - assert ( - point_labels is not None - ), "point_labels must be supplied if point_coords is supplied." - point_coords = self.transform.apply_coords(point_coords, self.original_size) - coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device) - labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device) - coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :] - if box is not None: - box = self.transform.apply_boxes(box, self.original_size) - box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device) - box_torch = box_torch[None, :] - if mask_input is not None: - mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device) - mask_input_torch = mask_input_torch[None, :, :, :] - - masks, iou_predictions, low_res_masks = self.predict_torch( - coords_torch, - labels_torch, - box_torch, - mask_input_torch, - multimask_output, - return_logits=return_logits, - ) - - masks = masks[0].detach().cpu().numpy() - iou_predictions = iou_predictions[0].detach().cpu().numpy() - low_res_masks = low_res_masks[0].detach().cpu().numpy() - return masks, iou_predictions, low_res_masks - - @torch.no_grad() - def predict_torch( - self, - point_coords: Optional[torch.Tensor], - point_labels: Optional[torch.Tensor], - boxes: Optional[torch.Tensor] = None, - mask_input: Optional[torch.Tensor] = None, - multimask_output: bool = True, - return_logits: bool = False, - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Predict masks for the given input prompts, using the currently set image. - Input prompts are batched torch tensors and are expected to already be - transformed to the input frame using ResizeLongestSide. - - Arguments: - point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the - model. Each point is in (X,Y) in pixels. - point_labels (torch.Tensor or None): A BxN array of labels for the - point prompts. 1 indicates a foreground point and 0 indicates a - background point. - box (np.ndarray or None): A Bx4 array given a box prompt to the - model, in XYXY format. - mask_input (np.ndarray): A low resolution mask input to the model, typically - coming from a previous prediction iteration. Has form Bx1xHxW, where - for SAM, H=W=256. Masks returned by a previous iteration of the - predict method do not need further transformation. - multimask_output (bool): If true, the model will return three masks. - For ambiguous input prompts (such as a single click), this will often - produce better masks than a single prediction. If only a single - mask is needed, the model's predicted quality score can be used - to select the best mask. For non-ambiguous prompts, such as multiple - input prompts, multimask_output=False can give better results. - return_logits (bool): If true, returns un-thresholded masks logits - instead of a binary mask. - - Returns: - (torch.Tensor): The output masks in BxCxHxW format, where C is the - number of masks, and (H, W) is the original image size. - (torch.Tensor): An array of shape BxC containing the model's - predictions for the quality of each mask. - (torch.Tensor): An array of shape BxCxHxW, where C is the number - of masks and H=W=256. These low res logits can be passed to - a subsequent iteration as mask input. - """ - if not self.is_image_set: - raise RuntimeError("An image must be set with .set_image(...) before mask prediction.") - - if point_coords is not None: - points = (point_coords, point_labels) - else: - points = None - - # Embed prompts - sparse_embeddings, dense_embeddings = self.model.prompt_encoder( - points=points, - boxes=boxes, - masks=mask_input, - ) - - # Predict masks - low_res_masks, iou_predictions = self.model.mask_decoder( - image_embeddings=self.features, - image_pe=self.model.prompt_encoder.get_dense_pe(), - sparse_prompt_embeddings=sparse_embeddings, - dense_prompt_embeddings=dense_embeddings, - multimask_output=multimask_output, - ) - - # Upscale the masks to the original image resolution - masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size) - - if not return_logits: - masks = masks > self.model.mask_threshold - - return masks, iou_predictions, low_res_masks - - def get_image_embedding(self) -> torch.Tensor: - """ - Returns the image embeddings for the currently set image, with - shape 1xCxHxW, where C is the embedding dimension and (H,W) are - the embedding spatial dimension of SAM (typically C=256, H=W=64). - """ - if not self.is_image_set: - raise RuntimeError( - "An image must be set with .set_image(...) to generate an embedding." - ) - assert self.features is not None, "Features must exist if an image has been set." - return self.features - - @property - def device(self) -> torch.device: - return self.model.device - - def reset_image(self) -> None: - """Resets the currently set image.""" - self.is_image_set = False - self.features = None - self.orig_h = None - self.orig_w = None - self.input_h = None - self.input_w = None diff --git a/spaces/KenjieDec/RemBG/rembg/session_base.py b/spaces/KenjieDec/RemBG/rembg/session_base.py deleted file mode 100644 index aa98693bc299f673fe6220f18b4b6d20c2c87d3a..0000000000000000000000000000000000000000 --- a/spaces/KenjieDec/RemBG/rembg/session_base.py +++ /dev/null @@ -1,40 +0,0 @@ -from typing import Dict, List, Tuple - -import numpy as np -import onnxruntime as ort -from PIL import Image -from PIL.Image import Image as PILImage - - -class BaseSession: - def __init__(self, model_name: str, inner_session: ort.InferenceSession): - self.model_name = model_name - self.inner_session = inner_session - - def normalize( - self, - img: PILImage, - mean: Tuple[float, float, float], - std: Tuple[float, float, float], - size: Tuple[int, int], - ) -> Dict[str, np.ndarray]: - im = img.convert("RGB").resize(size, Image.LANCZOS) - - im_ary = np.array(im) - im_ary = im_ary / np.max(im_ary) - - tmpImg = np.zeros((im_ary.shape[0], im_ary.shape[1], 3)) - tmpImg[:, :, 0] = (im_ary[:, :, 0] - mean[0]) / std[0] - tmpImg[:, :, 1] = (im_ary[:, :, 1] - mean[1]) / std[1] - tmpImg[:, :, 2] = (im_ary[:, :, 2] - mean[2]) / std[2] - - tmpImg = tmpImg.transpose((2, 0, 1)) - - return { - self.inner_session.get_inputs()[0] - .name: np.expand_dims(tmpImg, 0) - .astype(np.float32) - } - - def predict(self, img: PILImage) -> List[PILImage]: - raise NotImplementedError diff --git a/spaces/Kevin676/AutoGPT/autogpt/permanent_memory/__init__.py b/spaces/Kevin676/AutoGPT/autogpt/permanent_memory/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/KnowingFly/Linaqruf-anything-v3.0/app.py b/spaces/KnowingFly/Linaqruf-anything-v3.0/app.py deleted file mode 100644 index 16e8131a0bbf7b06956e69e2b7758fa01e4eb51f..0000000000000000000000000000000000000000 --- a/spaces/KnowingFly/Linaqruf-anything-v3.0/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/Linaqruf/anything-v3.0").launch() \ No newline at end of file diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/losses/smooth_l1_loss.py b/spaces/KyanChen/RSPrompter/mmdet/models/losses/smooth_l1_loss.py deleted file mode 100644 index fd5f043b8f6886276ab1de574752f78158797e51..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/losses/smooth_l1_loss.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional - -import torch -import torch.nn as nn -from torch import Tensor - -from mmdet.registry import MODELS -from .utils import weighted_loss - - -@weighted_loss -def smooth_l1_loss(pred: Tensor, target: Tensor, beta: float = 1.0) -> Tensor: - """Smooth L1 loss. - - Args: - pred (Tensor): The prediction. - target (Tensor): The learning target of the prediction. - beta (float, optional): The threshold in the piecewise function. - Defaults to 1.0. - - Returns: - Tensor: Calculated loss - """ - assert beta > 0 - if target.numel() == 0: - return pred.sum() * 0 - - assert pred.size() == target.size() - diff = torch.abs(pred - target) - loss = torch.where(diff < beta, 0.5 * diff * diff / beta, - diff - 0.5 * beta) - return loss - - -@weighted_loss -def l1_loss(pred: Tensor, target: Tensor) -> Tensor: - """L1 loss. - - Args: - pred (Tensor): The prediction. - target (Tensor): The learning target of the prediction. - - Returns: - Tensor: Calculated loss - """ - if target.numel() == 0: - return pred.sum() * 0 - - assert pred.size() == target.size() - loss = torch.abs(pred - target) - return loss - - -@MODELS.register_module() -class SmoothL1Loss(nn.Module): - """Smooth L1 loss. - - Args: - beta (float, optional): The threshold in the piecewise function. - Defaults to 1.0. - reduction (str, optional): The method to reduce the loss. - Options are "none", "mean" and "sum". Defaults to "mean". - loss_weight (float, optional): The weight of loss. - """ - - def __init__(self, - beta: float = 1.0, - reduction: str = 'mean', - loss_weight: float = 1.0) -> None: - super().__init__() - self.beta = beta - self.reduction = reduction - self.loss_weight = loss_weight - - def forward(self, - pred: Tensor, - target: Tensor, - weight: Optional[Tensor] = None, - avg_factor: Optional[int] = None, - reduction_override: Optional[str] = None, - **kwargs) -> Tensor: - """Forward function. - - Args: - pred (Tensor): The prediction. - target (Tensor): The learning target of the prediction. - weight (Tensor, optional): The weight of loss for each - prediction. Defaults to None. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Defaults to None. - - Returns: - Tensor: Calculated loss - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - loss_bbox = self.loss_weight * smooth_l1_loss( - pred, - target, - weight, - beta=self.beta, - reduction=reduction, - avg_factor=avg_factor, - **kwargs) - return loss_bbox - - -@MODELS.register_module() -class L1Loss(nn.Module): - """L1 loss. - - Args: - reduction (str, optional): The method to reduce the loss. - Options are "none", "mean" and "sum". - loss_weight (float, optional): The weight of loss. - """ - - def __init__(self, - reduction: str = 'mean', - loss_weight: float = 1.0) -> None: - super().__init__() - self.reduction = reduction - self.loss_weight = loss_weight - - def forward(self, - pred: Tensor, - target: Tensor, - weight: Optional[Tensor] = None, - avg_factor: Optional[int] = None, - reduction_override: Optional[str] = None) -> Tensor: - """Forward function. - - Args: - pred (Tensor): The prediction. - target (Tensor): The learning target of the prediction. - weight (Tensor, optional): The weight of loss for each - prediction. Defaults to None. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Defaults to None. - - Returns: - Tensor: Calculated loss - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - loss_bbox = self.loss_weight * l1_loss( - pred, target, weight, reduction=reduction, avg_factor=avg_factor) - return loss_bbox diff --git a/spaces/LDJA/iris/app/static/css/bootstrap.min.css b/spaces/LDJA/iris/app/static/css/bootstrap.min.css deleted file mode 100644 index 74a3ca1767f76e2be33d1550d0c713f5edd6d61d..0000000000000000000000000000000000000000 --- a/spaces/LDJA/iris/app/static/css/bootstrap.min.css +++ /dev/null @@ -1,7 +0,0 @@ -/*! - * Bootstrap v4.1.3 (https://getbootstrap.com/) - * Copyright 2011-2018 The Bootstrap Authors - * Copyright 2011-2018 Twitter, Inc. - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) - */:root{--blue:#007bff;--indigo:#6610f2;--purple:#6f42c1;--pink:#e83e8c;--red:#dc3545;--orange:#fd7e14;--yellow:#ffc107;--green:#28a745;--teal:#20c997;--cyan:#17a2b8;--white:#fff;--gray:#6c757d;--gray-dark:#343a40;--primary:#007bff;--secondary:#6c757d;--success:#28a745;--info:#17a2b8;--warning:#ffc107;--danger:#dc3545;--light:#f8f9fa;--dark:#343a40;--breakpoint-xs:0;--breakpoint-sm:576px;--breakpoint-md:768px;--breakpoint-lg:992px;--breakpoint-xl:1200px;--font-family-sans-serif:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-family-monospace:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace}*,::after,::before{box-sizing:border-box}html{font-family:sans-serif;line-height:1.15;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%;-ms-overflow-style:scrollbar;-webkit-tap-highlight-color:transparent}@-ms-viewport{width:device-width}article,aside,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}body{margin:0;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:left;background-color:#fff}[tabindex="-1"]:focus{outline:0!important}hr{box-sizing:content-box;height:0;overflow:visible}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:.5rem}p{margin-top:0;margin-bottom:1rem}abbr[data-original-title],abbr[title]{text-decoration:underline;-webkit-text-decoration:underline dotted;text-decoration:underline dotted;cursor:help;border-bottom:0}address{margin-bottom:1rem;font-style:normal;line-height:inherit}dl,ol,ul{margin-top:0;margin-bottom:1rem}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}dfn{font-style:italic}b,strong{font-weight:bolder}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{color:#007bff;text-decoration:none;background-color:transparent;-webkit-text-decoration-skip:objects}a:hover{color:#0056b3;text-decoration:underline}a:not([href]):not([tabindex]){color:inherit;text-decoration:none}a:not([href]):not([tabindex]):focus,a:not([href]):not([tabindex]):hover{color:inherit;text-decoration:none}a:not([href]):not([tabindex]):focus{outline:0}code,kbd,pre,samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;font-size:1em}pre{margin-top:0;margin-bottom:1rem;overflow:auto;-ms-overflow-style:scrollbar}figure{margin:0 0 1rem}img{vertical-align:middle;border-style:none}svg{overflow:hidden;vertical-align:middle}table{border-collapse:collapse}caption{padding-top:.75rem;padding-bottom:.75rem;color:#6c757d;text-align:left;caption-side:bottom}th{text-align:inherit}label{display:inline-block;margin-bottom:.5rem}button{border-radius:0}button:focus{outline:1px dotted;outline:5px auto -webkit-focus-ring-color}button,input,optgroup,select,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,input{overflow:visible}button,select{text-transform:none}[type=reset],[type=submit],button,html [type=button]{-webkit-appearance:button}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{padding:0;border-style:none}input[type=checkbox],input[type=radio]{box-sizing:border-box;padding:0}input[type=date],input[type=datetime-local],input[type=month],input[type=time]{-webkit-appearance:listbox}textarea{overflow:auto;resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;max-width:100%;padding:0;margin-bottom:.5rem;font-size:1.5rem;line-height:inherit;color:inherit;white-space:normal}progress{vertical-align:baseline}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{outline-offset:-2px;-webkit-appearance:none}[type=search]::-webkit-search-cancel-button,[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}output{display:inline-block}summary{display:list-item;cursor:pointer}template{display:none}[hidden]{display:none!important}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{margin-bottom:.5rem;font-family:inherit;font-weight:500;line-height:1.2;color:inherit}.h1,h1{font-size:2.5rem}.h2,h2{font-size:2rem}.h3,h3{font-size:1.75rem}.h4,h4{font-size:1.5rem}.h5,h5{font-size:1.25rem}.h6,h6{font-size:1rem}.lead{font-size:1.25rem;font-weight:300}.display-1{font-size:6rem;font-weight:300;line-height:1.2}.display-2{font-size:5.5rem;font-weight:300;line-height:1.2}.display-3{font-size:4.5rem;font-weight:300;line-height:1.2}.display-4{font-size:3.5rem;font-weight:300;line-height:1.2}hr{margin-top:1rem;margin-bottom:1rem;border:0;border-top:1px solid rgba(0,0,0,.1)}.small,small{font-size:80%;font-weight:400}.mark,mark{padding:.2em;background-color:#fcf8e3}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline-item{display:inline-block}.list-inline-item:not(:last-child){margin-right:.5rem}.initialism{font-size:90%;text-transform:uppercase}.blockquote{margin-bottom:1rem;font-size:1.25rem}.blockquote-footer{display:block;font-size:80%;color:#6c757d}.blockquote-footer::before{content:"\2014 \00A0"}.img-fluid{max-width:100%;height:auto}.img-thumbnail{padding:.25rem;background-color:#fff;border:1px solid #dee2e6;border-radius:.25rem;max-width:100%;height:auto}.figure{display:inline-block}.figure-img{margin-bottom:.5rem;line-height:1}.figure-caption{font-size:90%;color:#6c757d}code{font-size:87.5%;color:#e83e8c;word-break:break-word}a>code{color:inherit}kbd{padding:.2rem .4rem;font-size:87.5%;color:#fff;background-color:#212529;border-radius:.2rem}kbd kbd{padding:0;font-size:100%;font-weight:700}pre{display:block;font-size:87.5%;color:#212529}pre code{font-size:inherit;color:inherit;word-break:normal}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{width:100%;padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:576px){.container{max-width:540px}}@media (min-width:768px){.container{max-width:720px}}@media (min-width:992px){.container{max-width:960px}}@media (min-width:1200px){.container{max-width:1140px}}.container-fluid{width:100%;padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-right:-15px;margin-left:-15px}.no-gutters{margin-right:0;margin-left:0}.no-gutters>.col,.no-gutters>[class*=col-]{padding-right:0;padding-left:0}.col,.col-1,.col-10,.col-11,.col-12,.col-2,.col-3,.col-4,.col-5,.col-6,.col-7,.col-8,.col-9,.col-auto,.col-lg,.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-auto,.col-md,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-auto,.col-sm,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-auto,.col-xl,.col-xl-1,.col-xl-10,.col-xl-11,.col-xl-12,.col-xl-2,.col-xl-3,.col-xl-4,.col-xl-5,.col-xl-6,.col-xl-7,.col-xl-8,.col-xl-9,.col-xl-auto{position:relative;width:100%;min-height:1px;padding-right:15px;padding-left:15px}.col{-ms-flex-preferred-size:0;flex-basis:0;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-1{-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-2{-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-3{-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-4{-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-5{-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-6{-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-7{-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-8{-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-9{-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-10{-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-11{-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-12{-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-first{-ms-flex-order:-1;order:-1}.order-last{-ms-flex-order:13;order:13}.order-0{-ms-flex-order:0;order:0}.order-1{-ms-flex-order:1;order:1}.order-2{-ms-flex-order:2;order:2}.order-3{-ms-flex-order:3;order:3}.order-4{-ms-flex-order:4;order:4}.order-5{-ms-flex-order:5;order:5}.order-6{-ms-flex-order:6;order:6}.order-7{-ms-flex-order:7;order:7}.order-8{-ms-flex-order:8;order:8}.order-9{-ms-flex-order:9;order:9}.order-10{-ms-flex-order:10;order:10}.order-11{-ms-flex-order:11;order:11}.order-12{-ms-flex-order:12;order:12}.offset-1{margin-left:8.333333%}.offset-2{margin-left:16.666667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.333333%}.offset-5{margin-left:41.666667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.333333%}.offset-8{margin-left:66.666667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.333333%}.offset-11{margin-left:91.666667%}@media (min-width:576px){.col-sm{-ms-flex-preferred-size:0;flex-basis:0;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-sm-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-sm-1{-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-sm-2{-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-sm-3{-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-sm-4{-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-sm-5{-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-sm-6{-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-sm-7{-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-sm-8{-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-sm-9{-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-sm-10{-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-sm-11{-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-sm-12{-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-sm-first{-ms-flex-order:-1;order:-1}.order-sm-last{-ms-flex-order:13;order:13}.order-sm-0{-ms-flex-order:0;order:0}.order-sm-1{-ms-flex-order:1;order:1}.order-sm-2{-ms-flex-order:2;order:2}.order-sm-3{-ms-flex-order:3;order:3}.order-sm-4{-ms-flex-order:4;order:4}.order-sm-5{-ms-flex-order:5;order:5}.order-sm-6{-ms-flex-order:6;order:6}.order-sm-7{-ms-flex-order:7;order:7}.order-sm-8{-ms-flex-order:8;order:8}.order-sm-9{-ms-flex-order:9;order:9}.order-sm-10{-ms-flex-order:10;order:10}.order-sm-11{-ms-flex-order:11;order:11}.order-sm-12{-ms-flex-order:12;order:12}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.333333%}.offset-sm-2{margin-left:16.666667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.333333%}.offset-sm-5{margin-left:41.666667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.333333%}.offset-sm-8{margin-left:66.666667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.333333%}.offset-sm-11{margin-left:91.666667%}}@media (min-width:768px){.col-md{-ms-flex-preferred-size:0;flex-basis:0;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-md-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-md-1{-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-md-2{-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-md-3{-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-md-4{-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-md-5{-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-md-6{-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-md-7{-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-md-8{-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-md-9{-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-md-10{-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-md-11{-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-md-12{-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-md-first{-ms-flex-order:-1;order:-1}.order-md-last{-ms-flex-order:13;order:13}.order-md-0{-ms-flex-order:0;order:0}.order-md-1{-ms-flex-order:1;order:1}.order-md-2{-ms-flex-order:2;order:2}.order-md-3{-ms-flex-order:3;order:3}.order-md-4{-ms-flex-order:4;order:4}.order-md-5{-ms-flex-order:5;order:5}.order-md-6{-ms-flex-order:6;order:6}.order-md-7{-ms-flex-order:7;order:7}.order-md-8{-ms-flex-order:8;order:8}.order-md-9{-ms-flex-order:9;order:9}.order-md-10{-ms-flex-order:10;order:10}.order-md-11{-ms-flex-order:11;order:11}.order-md-12{-ms-flex-order:12;order:12}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.333333%}.offset-md-2{margin-left:16.666667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.333333%}.offset-md-5{margin-left:41.666667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.333333%}.offset-md-8{margin-left:66.666667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.333333%}.offset-md-11{margin-left:91.666667%}}@media (min-width:992px){.col-lg{-ms-flex-preferred-size:0;flex-basis:0;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-lg-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-lg-1{-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-lg-2{-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-lg-3{-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-lg-4{-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-lg-5{-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-lg-6{-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-lg-7{-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-lg-8{-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-lg-9{-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-lg-10{-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-lg-11{-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-lg-12{-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-lg-first{-ms-flex-order:-1;order:-1}.order-lg-last{-ms-flex-order:13;order:13}.order-lg-0{-ms-flex-order:0;order:0}.order-lg-1{-ms-flex-order:1;order:1}.order-lg-2{-ms-flex-order:2;order:2}.order-lg-3{-ms-flex-order:3;order:3}.order-lg-4{-ms-flex-order:4;order:4}.order-lg-5{-ms-flex-order:5;order:5}.order-lg-6{-ms-flex-order:6;order:6}.order-lg-7{-ms-flex-order:7;order:7}.order-lg-8{-ms-flex-order:8;order:8}.order-lg-9{-ms-flex-order:9;order:9}.order-lg-10{-ms-flex-order:10;order:10}.order-lg-11{-ms-flex-order:11;order:11}.order-lg-12{-ms-flex-order:12;order:12}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.333333%}.offset-lg-2{margin-left:16.666667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.333333%}.offset-lg-5{margin-left:41.666667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.333333%}.offset-lg-8{margin-left:66.666667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.333333%}.offset-lg-11{margin-left:91.666667%}}@media (min-width:1200px){.col-xl{-ms-flex-preferred-size:0;flex-basis:0;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-xl-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-xl-1{-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-xl-2{-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-xl-3{-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-xl-4{-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-xl-5{-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-xl-6{-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-xl-7{-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-xl-8{-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-xl-9{-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-xl-10{-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-xl-11{-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-xl-12{-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-xl-first{-ms-flex-order:-1;order:-1}.order-xl-last{-ms-flex-order:13;order:13}.order-xl-0{-ms-flex-order:0;order:0}.order-xl-1{-ms-flex-order:1;order:1}.order-xl-2{-ms-flex-order:2;order:2}.order-xl-3{-ms-flex-order:3;order:3}.order-xl-4{-ms-flex-order:4;order:4}.order-xl-5{-ms-flex-order:5;order:5}.order-xl-6{-ms-flex-order:6;order:6}.order-xl-7{-ms-flex-order:7;order:7}.order-xl-8{-ms-flex-order:8;order:8}.order-xl-9{-ms-flex-order:9;order:9}.order-xl-10{-ms-flex-order:10;order:10}.order-xl-11{-ms-flex-order:11;order:11}.order-xl-12{-ms-flex-order:12;order:12}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.333333%}.offset-xl-2{margin-left:16.666667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.333333%}.offset-xl-5{margin-left:41.666667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.333333%}.offset-xl-8{margin-left:66.666667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.333333%}.offset-xl-11{margin-left:91.666667%}}.table{width:100%;margin-bottom:1rem;background-color:transparent}.table td,.table th{padding:.75rem;vertical-align:top;border-top:1px solid #dee2e6}.table thead th{vertical-align:bottom;border-bottom:2px solid #dee2e6}.table tbody+tbody{border-top:2px solid #dee2e6}.table .table{background-color:#fff}.table-sm td,.table-sm th{padding:.3rem}.table-bordered{border:1px solid #dee2e6}.table-bordered td,.table-bordered th{border:1px solid #dee2e6}.table-bordered thead td,.table-bordered thead th{border-bottom-width:2px}.table-borderless tbody+tbody,.table-borderless td,.table-borderless th,.table-borderless thead th{border:0}.table-striped tbody tr:nth-of-type(odd){background-color:rgba(0,0,0,.05)}.table-hover tbody tr:hover{background-color:rgba(0,0,0,.075)}.table-primary,.table-primary>td,.table-primary>th{background-color:#b8daff}.table-hover .table-primary:hover{background-color:#9fcdff}.table-hover .table-primary:hover>td,.table-hover .table-primary:hover>th{background-color:#9fcdff}.table-secondary,.table-secondary>td,.table-secondary>th{background-color:#d6d8db}.table-hover .table-secondary:hover{background-color:#c8cbcf}.table-hover .table-secondary:hover>td,.table-hover .table-secondary:hover>th{background-color:#c8cbcf}.table-success,.table-success>td,.table-success>th{background-color:#c3e6cb}.table-hover .table-success:hover{background-color:#b1dfbb}.table-hover .table-success:hover>td,.table-hover .table-success:hover>th{background-color:#b1dfbb}.table-info,.table-info>td,.table-info>th{background-color:#bee5eb}.table-hover .table-info:hover{background-color:#abdde5}.table-hover .table-info:hover>td,.table-hover .table-info:hover>th{background-color:#abdde5}.table-warning,.table-warning>td,.table-warning>th{background-color:#ffeeba}.table-hover .table-warning:hover{background-color:#ffe8a1}.table-hover .table-warning:hover>td,.table-hover .table-warning:hover>th{background-color:#ffe8a1}.table-danger,.table-danger>td,.table-danger>th{background-color:#f5c6cb}.table-hover .table-danger:hover{background-color:#f1b0b7}.table-hover .table-danger:hover>td,.table-hover .table-danger:hover>th{background-color:#f1b0b7}.table-light,.table-light>td,.table-light>th{background-color:#fdfdfe}.table-hover .table-light:hover{background-color:#ececf6}.table-hover .table-light:hover>td,.table-hover .table-light:hover>th{background-color:#ececf6}.table-dark,.table-dark>td,.table-dark>th{background-color:#c6c8ca}.table-hover .table-dark:hover{background-color:#b9bbbe}.table-hover .table-dark:hover>td,.table-hover .table-dark:hover>th{background-color:#b9bbbe}.table-active,.table-active>td,.table-active>th{background-color:rgba(0,0,0,.075)}.table-hover .table-active:hover{background-color:rgba(0,0,0,.075)}.table-hover .table-active:hover>td,.table-hover .table-active:hover>th{background-color:rgba(0,0,0,.075)}.table .thead-dark th{color:#fff;background-color:#212529;border-color:#32383e}.table .thead-light th{color:#495057;background-color:#e9ecef;border-color:#dee2e6}.table-dark{color:#fff;background-color:#212529}.table-dark td,.table-dark th,.table-dark thead th{border-color:#32383e}.table-dark.table-bordered{border:0}.table-dark.table-striped tbody tr:nth-of-type(odd){background-color:rgba(255,255,255,.05)}.table-dark.table-hover tbody tr:hover{background-color:rgba(255,255,255,.075)}@media (max-width:575.98px){.table-responsive-sm{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive-sm>.table-bordered{border:0}}@media (max-width:767.98px){.table-responsive-md{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive-md>.table-bordered{border:0}}@media (max-width:991.98px){.table-responsive-lg{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive-lg>.table-bordered{border:0}}@media (max-width:1199.98px){.table-responsive-xl{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive-xl>.table-bordered{border:0}}.table-responsive{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive>.table-bordered{border:0}.form-control{display:block;width:100%;height:calc(2.25rem + 2px);padding:.375rem .75rem;font-size:1rem;line-height:1.5;color:#495057;background-color:#fff;background-clip:padding-box;border:1px solid #ced4da;border-radius:.25rem;transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media screen and (prefers-reduced-motion:reduce){.form-control{transition:none}}.form-control::-ms-expand{background-color:transparent;border:0}.form-control:focus{color:#495057;background-color:#fff;border-color:#80bdff;outline:0;box-shadow:0 0 0 .2rem rgba(0,123,255,.25)}.form-control::-webkit-input-placeholder{color:#6c757d;opacity:1}.form-control::-moz-placeholder{color:#6c757d;opacity:1}.form-control:-ms-input-placeholder{color:#6c757d;opacity:1}.form-control::-ms-input-placeholder{color:#6c757d;opacity:1}.form-control::placeholder{color:#6c757d;opacity:1}.form-control:disabled,.form-control[readonly]{background-color:#e9ecef;opacity:1}select.form-control:focus::-ms-value{color:#495057;background-color:#fff}.form-control-file,.form-control-range{display:block;width:100%}.col-form-label{padding-top:calc(.375rem + 1px);padding-bottom:calc(.375rem + 1px);margin-bottom:0;font-size:inherit;line-height:1.5}.col-form-label-lg{padding-top:calc(.5rem + 1px);padding-bottom:calc(.5rem + 1px);font-size:1.25rem;line-height:1.5}.col-form-label-sm{padding-top:calc(.25rem + 1px);padding-bottom:calc(.25rem + 1px);font-size:.875rem;line-height:1.5}.form-control-plaintext{display:block;width:100%;padding-top:.375rem;padding-bottom:.375rem;margin-bottom:0;line-height:1.5;color:#212529;background-color:transparent;border:solid transparent;border-width:1px 0}.form-control-plaintext.form-control-lg,.form-control-plaintext.form-control-sm{padding-right:0;padding-left:0}.form-control-sm{height:calc(1.8125rem + 2px);padding:.25rem .5rem;font-size:.875rem;line-height:1.5;border-radius:.2rem}.form-control-lg{height:calc(2.875rem + 2px);padding:.5rem 1rem;font-size:1.25rem;line-height:1.5;border-radius:.3rem}select.form-control[multiple],select.form-control[size]{height:auto}textarea.form-control{height:auto}.form-group{margin-bottom:1rem}.form-text{display:block;margin-top:.25rem}.form-row{display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-right:-5px;margin-left:-5px}.form-row>.col,.form-row>[class*=col-]{padding-right:5px;padding-left:5px}.form-check{position:relative;display:block;padding-left:1.25rem}.form-check-input{position:absolute;margin-top:.3rem;margin-left:-1.25rem}.form-check-input:disabled~.form-check-label{color:#6c757d}.form-check-label{margin-bottom:0}.form-check-inline{display:-ms-inline-flexbox;display:inline-flex;-ms-flex-align:center;align-items:center;padding-left:0;margin-right:.75rem}.form-check-inline .form-check-input{position:static;margin-top:0;margin-right:.3125rem;margin-left:0}.valid-feedback{display:none;width:100%;margin-top:.25rem;font-size:80%;color:#28a745}.valid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;line-height:1.5;color:#fff;background-color:rgba(40,167,69,.9);border-radius:.25rem}.custom-select.is-valid,.form-control.is-valid,.was-validated .custom-select:valid,.was-validated .form-control:valid{border-color:#28a745}.custom-select.is-valid:focus,.form-control.is-valid:focus,.was-validated .custom-select:valid:focus,.was-validated .form-control:valid:focus{border-color:#28a745;box-shadow:0 0 0 .2rem rgba(40,167,69,.25)}.custom-select.is-valid~.valid-feedback,.custom-select.is-valid~.valid-tooltip,.form-control.is-valid~.valid-feedback,.form-control.is-valid~.valid-tooltip,.was-validated .custom-select:valid~.valid-feedback,.was-validated .custom-select:valid~.valid-tooltip,.was-validated .form-control:valid~.valid-feedback,.was-validated .form-control:valid~.valid-tooltip{display:block}.form-control-file.is-valid~.valid-feedback,.form-control-file.is-valid~.valid-tooltip,.was-validated .form-control-file:valid~.valid-feedback,.was-validated .form-control-file:valid~.valid-tooltip{display:block}.form-check-input.is-valid~.form-check-label,.was-validated .form-check-input:valid~.form-check-label{color:#28a745}.form-check-input.is-valid~.valid-feedback,.form-check-input.is-valid~.valid-tooltip,.was-validated .form-check-input:valid~.valid-feedback,.was-validated .form-check-input:valid~.valid-tooltip{display:block}.custom-control-input.is-valid~.custom-control-label,.was-validated .custom-control-input:valid~.custom-control-label{color:#28a745}.custom-control-input.is-valid~.custom-control-label::before,.was-validated .custom-control-input:valid~.custom-control-label::before{background-color:#71dd8a}.custom-control-input.is-valid~.valid-feedback,.custom-control-input.is-valid~.valid-tooltip,.was-validated .custom-control-input:valid~.valid-feedback,.was-validated .custom-control-input:valid~.valid-tooltip{display:block}.custom-control-input.is-valid:checked~.custom-control-label::before,.was-validated .custom-control-input:valid:checked~.custom-control-label::before{background-color:#34ce57}.custom-control-input.is-valid:focus~.custom-control-label::before,.was-validated .custom-control-input:valid:focus~.custom-control-label::before{box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(40,167,69,.25)}.custom-file-input.is-valid~.custom-file-label,.was-validated .custom-file-input:valid~.custom-file-label{border-color:#28a745}.custom-file-input.is-valid~.custom-file-label::after,.was-validated .custom-file-input:valid~.custom-file-label::after{border-color:inherit}.custom-file-input.is-valid~.valid-feedback,.custom-file-input.is-valid~.valid-tooltip,.was-validated .custom-file-input:valid~.valid-feedback,.was-validated .custom-file-input:valid~.valid-tooltip{display:block}.custom-file-input.is-valid:focus~.custom-file-label,.was-validated .custom-file-input:valid:focus~.custom-file-label{box-shadow:0 0 0 .2rem rgba(40,167,69,.25)}.invalid-feedback{display:none;width:100%;margin-top:.25rem;font-size:80%;color:#dc3545}.invalid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;line-height:1.5;color:#fff;background-color:rgba(220,53,69,.9);border-radius:.25rem}.custom-select.is-invalid,.form-control.is-invalid,.was-validated .custom-select:invalid,.was-validated .form-control:invalid{border-color:#dc3545}.custom-select.is-invalid:focus,.form-control.is-invalid:focus,.was-validated .custom-select:invalid:focus,.was-validated .form-control:invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .2rem rgba(220,53,69,.25)}.custom-select.is-invalid~.invalid-feedback,.custom-select.is-invalid~.invalid-tooltip,.form-control.is-invalid~.invalid-feedback,.form-control.is-invalid~.invalid-tooltip,.was-validated .custom-select:invalid~.invalid-feedback,.was-validated .custom-select:invalid~.invalid-tooltip,.was-validated .form-control:invalid~.invalid-feedback,.was-validated .form-control:invalid~.invalid-tooltip{display:block}.form-control-file.is-invalid~.invalid-feedback,.form-control-file.is-invalid~.invalid-tooltip,.was-validated .form-control-file:invalid~.invalid-feedback,.was-validated .form-control-file:invalid~.invalid-tooltip{display:block}.form-check-input.is-invalid~.form-check-label,.was-validated .form-check-input:invalid~.form-check-label{color:#dc3545}.form-check-input.is-invalid~.invalid-feedback,.form-check-input.is-invalid~.invalid-tooltip,.was-validated .form-check-input:invalid~.invalid-feedback,.was-validated .form-check-input:invalid~.invalid-tooltip{display:block}.custom-control-input.is-invalid~.custom-control-label,.was-validated .custom-control-input:invalid~.custom-control-label{color:#dc3545}.custom-control-input.is-invalid~.custom-control-label::before,.was-validated .custom-control-input:invalid~.custom-control-label::before{background-color:#efa2a9}.custom-control-input.is-invalid~.invalid-feedback,.custom-control-input.is-invalid~.invalid-tooltip,.was-validated .custom-control-input:invalid~.invalid-feedback,.was-validated .custom-control-input:invalid~.invalid-tooltip{display:block}.custom-control-input.is-invalid:checked~.custom-control-label::before,.was-validated .custom-control-input:invalid:checked~.custom-control-label::before{background-color:#e4606d}.custom-control-input.is-invalid:focus~.custom-control-label::before,.was-validated .custom-control-input:invalid:focus~.custom-control-label::before{box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(220,53,69,.25)}.custom-file-input.is-invalid~.custom-file-label,.was-validated .custom-file-input:invalid~.custom-file-label{border-color:#dc3545}.custom-file-input.is-invalid~.custom-file-label::after,.was-validated .custom-file-input:invalid~.custom-file-label::after{border-color:inherit}.custom-file-input.is-invalid~.invalid-feedback,.custom-file-input.is-invalid~.invalid-tooltip,.was-validated .custom-file-input:invalid~.invalid-feedback,.was-validated .custom-file-input:invalid~.invalid-tooltip{display:block}.custom-file-input.is-invalid:focus~.custom-file-label,.was-validated .custom-file-input:invalid:focus~.custom-file-label{box-shadow:0 0 0 .2rem rgba(220,53,69,.25)}.form-inline{display:-ms-flexbox;display:flex;-ms-flex-flow:row wrap;flex-flow:row wrap;-ms-flex-align:center;align-items:center}.form-inline .form-check{width:100%}@media (min-width:576px){.form-inline label{display:-ms-flexbox;display:flex;-ms-flex-align:center;align-items:center;-ms-flex-pack:center;justify-content:center;margin-bottom:0}.form-inline .form-group{display:-ms-flexbox;display:flex;-ms-flex:0 0 auto;flex:0 0 auto;-ms-flex-flow:row wrap;flex-flow:row wrap;-ms-flex-align:center;align-items:center;margin-bottom:0}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-plaintext{display:inline-block}.form-inline .custom-select,.form-inline .input-group{width:auto}.form-inline .form-check{display:-ms-flexbox;display:flex;-ms-flex-align:center;align-items:center;-ms-flex-pack:center;justify-content:center;width:auto;padding-left:0}.form-inline .form-check-input{position:relative;margin-top:0;margin-right:.25rem;margin-left:0}.form-inline .custom-control{-ms-flex-align:center;align-items:center;-ms-flex-pack:center;justify-content:center}.form-inline .custom-control-label{margin-bottom:0}}.btn{display:inline-block;font-weight:400;text-align:center;white-space:nowrap;vertical-align:middle;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;border:1px solid transparent;padding:.375rem .75rem;font-size:1rem;line-height:1.5;border-radius:.25rem;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media screen and (prefers-reduced-motion:reduce){.btn{transition:none}}.btn:focus,.btn:hover{text-decoration:none}.btn.focus,.btn:focus{outline:0;box-shadow:0 0 0 .2rem rgba(0,123,255,.25)}.btn.disabled,.btn:disabled{opacity:.65}.btn:not(:disabled):not(.disabled){cursor:pointer}a.btn.disabled,fieldset:disabled a.btn{pointer-events:none}.btn-primary{color:#fff;background-color:#007bff;border-color:#007bff}.btn-primary:hover{color:#fff;background-color:#0069d9;border-color:#0062cc}.btn-primary.focus,.btn-primary:focus{box-shadow:0 0 0 .2rem rgba(0,123,255,.5)}.btn-primary.disabled,.btn-primary:disabled{color:#fff;background-color:#007bff;border-color:#007bff}.btn-primary:not(:disabled):not(.disabled).active,.btn-primary:not(:disabled):not(.disabled):active,.show>.btn-primary.dropdown-toggle{color:#fff;background-color:#0062cc;border-color:#005cbf}.btn-primary:not(:disabled):not(.disabled).active:focus,.btn-primary:not(:disabled):not(.disabled):active:focus,.show>.btn-primary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(0,123,255,.5)}.btn-secondary{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-secondary:hover{color:#fff;background-color:#5a6268;border-color:#545b62}.btn-secondary.focus,.btn-secondary:focus{box-shadow:0 0 0 .2rem rgba(108,117,125,.5)}.btn-secondary.disabled,.btn-secondary:disabled{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-secondary:not(:disabled):not(.disabled).active,.btn-secondary:not(:disabled):not(.disabled):active,.show>.btn-secondary.dropdown-toggle{color:#fff;background-color:#545b62;border-color:#4e555b}.btn-secondary:not(:disabled):not(.disabled).active:focus,.btn-secondary:not(:disabled):not(.disabled):active:focus,.show>.btn-secondary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(108,117,125,.5)}.btn-success{color:#fff;background-color:#28a745;border-color:#28a745}.btn-success:hover{color:#fff;background-color:#218838;border-color:#1e7e34}.btn-success.focus,.btn-success:focus{box-shadow:0 0 0 .2rem rgba(40,167,69,.5)}.btn-success.disabled,.btn-success:disabled{color:#fff;background-color:#28a745;border-color:#28a745}.btn-success:not(:disabled):not(.disabled).active,.btn-success:not(:disabled):not(.disabled):active,.show>.btn-success.dropdown-toggle{color:#fff;background-color:#1e7e34;border-color:#1c7430}.btn-success:not(:disabled):not(.disabled).active:focus,.btn-success:not(:disabled):not(.disabled):active:focus,.show>.btn-success.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(40,167,69,.5)}.btn-info{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-info:hover{color:#fff;background-color:#138496;border-color:#117a8b}.btn-info.focus,.btn-info:focus{box-shadow:0 0 0 .2rem rgba(23,162,184,.5)}.btn-info.disabled,.btn-info:disabled{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-info:not(:disabled):not(.disabled).active,.btn-info:not(:disabled):not(.disabled):active,.show>.btn-info.dropdown-toggle{color:#fff;background-color:#117a8b;border-color:#10707f}.btn-info:not(:disabled):not(.disabled).active:focus,.btn-info:not(:disabled):not(.disabled):active:focus,.show>.btn-info.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(23,162,184,.5)}.btn-warning{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-warning:hover{color:#212529;background-color:#e0a800;border-color:#d39e00}.btn-warning.focus,.btn-warning:focus{box-shadow:0 0 0 .2rem rgba(255,193,7,.5)}.btn-warning.disabled,.btn-warning:disabled{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-warning:not(:disabled):not(.disabled).active,.btn-warning:not(:disabled):not(.disabled):active,.show>.btn-warning.dropdown-toggle{color:#212529;background-color:#d39e00;border-color:#c69500}.btn-warning:not(:disabled):not(.disabled).active:focus,.btn-warning:not(:disabled):not(.disabled):active:focus,.show>.btn-warning.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(255,193,7,.5)}.btn-danger{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger:hover{color:#fff;background-color:#c82333;border-color:#bd2130}.btn-danger.focus,.btn-danger:focus{box-shadow:0 0 0 .2rem rgba(220,53,69,.5)}.btn-danger.disabled,.btn-danger:disabled{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger:not(:disabled):not(.disabled).active,.btn-danger:not(:disabled):not(.disabled):active,.show>.btn-danger.dropdown-toggle{color:#fff;background-color:#bd2130;border-color:#b21f2d}.btn-danger:not(:disabled):not(.disabled).active:focus,.btn-danger:not(:disabled):not(.disabled):active:focus,.show>.btn-danger.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(220,53,69,.5)}.btn-light{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-light:hover{color:#212529;background-color:#e2e6ea;border-color:#dae0e5}.btn-light.focus,.btn-light:focus{box-shadow:0 0 0 .2rem rgba(248,249,250,.5)}.btn-light.disabled,.btn-light:disabled{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-light:not(:disabled):not(.disabled).active,.btn-light:not(:disabled):not(.disabled):active,.show>.btn-light.dropdown-toggle{color:#212529;background-color:#dae0e5;border-color:#d3d9df}.btn-light:not(:disabled):not(.disabled).active:focus,.btn-light:not(:disabled):not(.disabled):active:focus,.show>.btn-light.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(248,249,250,.5)}.btn-dark{color:#fff;background-color:#343a40;border-color:#343a40}.btn-dark:hover{color:#fff;background-color:#23272b;border-color:#1d2124}.btn-dark.focus,.btn-dark:focus{box-shadow:0 0 0 .2rem rgba(52,58,64,.5)}.btn-dark.disabled,.btn-dark:disabled{color:#fff;background-color:#343a40;border-color:#343a40}.btn-dark:not(:disabled):not(.disabled).active,.btn-dark:not(:disabled):not(.disabled):active,.show>.btn-dark.dropdown-toggle{color:#fff;background-color:#1d2124;border-color:#171a1d}.btn-dark:not(:disabled):not(.disabled).active:focus,.btn-dark:not(:disabled):not(.disabled):active:focus,.show>.btn-dark.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(52,58,64,.5)}.btn-outline-primary{color:#007bff;background-color:transparent;background-image:none;border-color:#007bff}.btn-outline-primary:hover{color:#fff;background-color:#007bff;border-color:#007bff}.btn-outline-primary.focus,.btn-outline-primary:focus{box-shadow:0 0 0 .2rem rgba(0,123,255,.5)}.btn-outline-primary.disabled,.btn-outline-primary:disabled{color:#007bff;background-color:transparent}.btn-outline-primary:not(:disabled):not(.disabled).active,.btn-outline-primary:not(:disabled):not(.disabled):active,.show>.btn-outline-primary.dropdown-toggle{color:#fff;background-color:#007bff;border-color:#007bff}.btn-outline-primary:not(:disabled):not(.disabled).active:focus,.btn-outline-primary:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-primary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(0,123,255,.5)}.btn-outline-secondary{color:#6c757d;background-color:transparent;background-image:none;border-color:#6c757d}.btn-outline-secondary:hover{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-outline-secondary.focus,.btn-outline-secondary:focus{box-shadow:0 0 0 .2rem rgba(108,117,125,.5)}.btn-outline-secondary.disabled,.btn-outline-secondary:disabled{color:#6c757d;background-color:transparent}.btn-outline-secondary:not(:disabled):not(.disabled).active,.btn-outline-secondary:not(:disabled):not(.disabled):active,.show>.btn-outline-secondary.dropdown-toggle{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-outline-secondary:not(:disabled):not(.disabled).active:focus,.btn-outline-secondary:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-secondary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(108,117,125,.5)}.btn-outline-success{color:#28a745;background-color:transparent;background-image:none;border-color:#28a745}.btn-outline-success:hover{color:#fff;background-color:#28a745;border-color:#28a745}.btn-outline-success.focus,.btn-outline-success:focus{box-shadow:0 0 0 .2rem rgba(40,167,69,.5)}.btn-outline-success.disabled,.btn-outline-success:disabled{color:#28a745;background-color:transparent}.btn-outline-success:not(:disabled):not(.disabled).active,.btn-outline-success:not(:disabled):not(.disabled):active,.show>.btn-outline-success.dropdown-toggle{color:#fff;background-color:#28a745;border-color:#28a745}.btn-outline-success:not(:disabled):not(.disabled).active:focus,.btn-outline-success:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-success.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(40,167,69,.5)}.btn-outline-info{color:#17a2b8;background-color:transparent;background-image:none;border-color:#17a2b8}.btn-outline-info:hover{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-outline-info.focus,.btn-outline-info:focus{box-shadow:0 0 0 .2rem rgba(23,162,184,.5)}.btn-outline-info.disabled,.btn-outline-info:disabled{color:#17a2b8;background-color:transparent}.btn-outline-info:not(:disabled):not(.disabled).active,.btn-outline-info:not(:disabled):not(.disabled):active,.show>.btn-outline-info.dropdown-toggle{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-outline-info:not(:disabled):not(.disabled).active:focus,.btn-outline-info:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-info.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(23,162,184,.5)}.btn-outline-warning{color:#ffc107;background-color:transparent;background-image:none;border-color:#ffc107}.btn-outline-warning:hover{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-outline-warning.focus,.btn-outline-warning:focus{box-shadow:0 0 0 .2rem rgba(255,193,7,.5)}.btn-outline-warning.disabled,.btn-outline-warning:disabled{color:#ffc107;background-color:transparent}.btn-outline-warning:not(:disabled):not(.disabled).active,.btn-outline-warning:not(:disabled):not(.disabled):active,.show>.btn-outline-warning.dropdown-toggle{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-outline-warning:not(:disabled):not(.disabled).active:focus,.btn-outline-warning:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-warning.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(255,193,7,.5)}.btn-outline-danger{color:#dc3545;background-color:transparent;background-image:none;border-color:#dc3545}.btn-outline-danger:hover{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-outline-danger.focus,.btn-outline-danger:focus{box-shadow:0 0 0 .2rem rgba(220,53,69,.5)}.btn-outline-danger.disabled,.btn-outline-danger:disabled{color:#dc3545;background-color:transparent}.btn-outline-danger:not(:disabled):not(.disabled).active,.btn-outline-danger:not(:disabled):not(.disabled):active,.show>.btn-outline-danger.dropdown-toggle{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-outline-danger:not(:disabled):not(.disabled).active:focus,.btn-outline-danger:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-danger.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(220,53,69,.5)}.btn-outline-light{color:#f8f9fa;background-color:transparent;background-image:none;border-color:#f8f9fa}.btn-outline-light:hover{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light.focus,.btn-outline-light:focus{box-shadow:0 0 0 .2rem rgba(248,249,250,.5)}.btn-outline-light.disabled,.btn-outline-light:disabled{color:#f8f9fa;background-color:transparent}.btn-outline-light:not(:disabled):not(.disabled).active,.btn-outline-light:not(:disabled):not(.disabled):active,.show>.btn-outline-light.dropdown-toggle{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light:not(:disabled):not(.disabled).active:focus,.btn-outline-light:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-light.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(248,249,250,.5)}.btn-outline-dark{color:#343a40;background-color:transparent;background-image:none;border-color:#343a40}.btn-outline-dark:hover{color:#fff;background-color:#343a40;border-color:#343a40}.btn-outline-dark.focus,.btn-outline-dark:focus{box-shadow:0 0 0 .2rem rgba(52,58,64,.5)}.btn-outline-dark.disabled,.btn-outline-dark:disabled{color:#343a40;background-color:transparent}.btn-outline-dark:not(:disabled):not(.disabled).active,.btn-outline-dark:not(:disabled):not(.disabled):active,.show>.btn-outline-dark.dropdown-toggle{color:#fff;background-color:#343a40;border-color:#343a40}.btn-outline-dark:not(:disabled):not(.disabled).active:focus,.btn-outline-dark:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-dark.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(52,58,64,.5)}.btn-link{font-weight:400;color:#007bff;background-color:transparent}.btn-link:hover{color:#0056b3;text-decoration:underline;background-color:transparent;border-color:transparent}.btn-link.focus,.btn-link:focus{text-decoration:underline;border-color:transparent;box-shadow:none}.btn-link.disabled,.btn-link:disabled{color:#6c757d;pointer-events:none}.btn-group-lg>.btn,.btn-lg{padding:.5rem 1rem;font-size:1.25rem;line-height:1.5;border-radius:.3rem}.btn-group-sm>.btn,.btn-sm{padding:.25rem .5rem;font-size:.875rem;line-height:1.5;border-radius:.2rem}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:.5rem}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{transition:opacity .15s linear}@media screen and (prefers-reduced-motion:reduce){.fade{transition:none}}.fade:not(.show){opacity:0}.collapse:not(.show){display:none}.collapsing{position:relative;height:0;overflow:hidden;transition:height .35s ease}@media screen and (prefers-reduced-motion:reduce){.collapsing{transition:none}}.dropdown,.dropleft,.dropright,.dropup{position:relative}.dropdown-toggle::after{display:inline-block;width:0;height:0;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid;border-right:.3em solid transparent;border-bottom:0;border-left:.3em solid transparent}.dropdown-toggle:empty::after{margin-left:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:10rem;padding:.5rem 0;margin:.125rem 0 0;font-size:1rem;color:#212529;text-align:left;list-style:none;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,.15);border-radius:.25rem}.dropdown-menu-right{right:0;left:auto}.dropup .dropdown-menu{top:auto;bottom:100%;margin-top:0;margin-bottom:.125rem}.dropup .dropdown-toggle::after{display:inline-block;width:0;height:0;margin-left:.255em;vertical-align:.255em;content:"";border-top:0;border-right:.3em solid transparent;border-bottom:.3em solid;border-left:.3em solid transparent}.dropup .dropdown-toggle:empty::after{margin-left:0}.dropright .dropdown-menu{top:0;right:auto;left:100%;margin-top:0;margin-left:.125rem}.dropright .dropdown-toggle::after{display:inline-block;width:0;height:0;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:0;border-bottom:.3em solid transparent;border-left:.3em solid}.dropright .dropdown-toggle:empty::after{margin-left:0}.dropright .dropdown-toggle::after{vertical-align:0}.dropleft .dropdown-menu{top:0;right:100%;left:auto;margin-top:0;margin-right:.125rem}.dropleft .dropdown-toggle::after{display:inline-block;width:0;height:0;margin-left:.255em;vertical-align:.255em;content:""}.dropleft .dropdown-toggle::after{display:none}.dropleft .dropdown-toggle::before{display:inline-block;width:0;height:0;margin-right:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:.3em solid;border-bottom:.3em solid transparent}.dropleft .dropdown-toggle:empty::after{margin-left:0}.dropleft .dropdown-toggle::before{vertical-align:0}.dropdown-menu[x-placement^=bottom],.dropdown-menu[x-placement^=left],.dropdown-menu[x-placement^=right],.dropdown-menu[x-placement^=top]{right:auto;bottom:auto}.dropdown-divider{height:0;margin:.5rem 0;overflow:hidden;border-top:1px solid #e9ecef}.dropdown-item{display:block;width:100%;padding:.25rem 1.5rem;clear:both;font-weight:400;color:#212529;text-align:inherit;white-space:nowrap;background-color:transparent;border:0}.dropdown-item:focus,.dropdown-item:hover{color:#16181b;text-decoration:none;background-color:#f8f9fa}.dropdown-item.active,.dropdown-item:active{color:#fff;text-decoration:none;background-color:#007bff}.dropdown-item.disabled,.dropdown-item:disabled{color:#6c757d;background-color:transparent}.dropdown-menu.show{display:block}.dropdown-header{display:block;padding:.5rem 1.5rem;margin-bottom:0;font-size:.875rem;color:#6c757d;white-space:nowrap}.dropdown-item-text{display:block;padding:.25rem 1.5rem;color:#212529}.btn-group,.btn-group-vertical{position:relative;display:-ms-inline-flexbox;display:inline-flex;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;-ms-flex:0 1 auto;flex:0 1 auto}.btn-group-vertical>.btn:hover,.btn-group>.btn:hover{z-index:1}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus{z-index:1}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group,.btn-group-vertical .btn+.btn,.btn-group-vertical .btn+.btn-group,.btn-group-vertical .btn-group+.btn,.btn-group-vertical .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;-ms-flex-pack:start;justify-content:flex-start}.btn-toolbar .input-group{width:auto}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn-group:not(:last-child)>.btn,.btn-group>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:not(:first-child)>.btn,.btn-group>.btn:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.dropdown-toggle-split{padding-right:.5625rem;padding-left:.5625rem}.dropdown-toggle-split::after,.dropright .dropdown-toggle-split::after,.dropup .dropdown-toggle-split::after{margin-left:0}.dropleft .dropdown-toggle-split::before{margin-right:0}.btn-group-sm>.btn+.dropdown-toggle-split,.btn-sm+.dropdown-toggle-split{padding-right:.375rem;padding-left:.375rem}.btn-group-lg>.btn+.dropdown-toggle-split,.btn-lg+.dropdown-toggle-split{padding-right:.75rem;padding-left:.75rem}.btn-group-vertical{-ms-flex-direction:column;flex-direction:column;-ms-flex-align:start;align-items:flex-start;-ms-flex-pack:center;justify-content:center}.btn-group-vertical .btn,.btn-group-vertical .btn-group{width:100%}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn-group:not(:last-child)>.btn,.btn-group-vertical>.btn:not(:last-child):not(.dropdown-toggle){border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:not(:first-child)>.btn,.btn-group-vertical>.btn:not(:first-child){border-top-left-radius:0;border-top-right-radius:0}.btn-group-toggle>.btn,.btn-group-toggle>.btn-group>.btn{margin-bottom:0}.btn-group-toggle>.btn input[type=checkbox],.btn-group-toggle>.btn input[type=radio],.btn-group-toggle>.btn-group>.btn input[type=checkbox],.btn-group-toggle>.btn-group>.btn input[type=radio]{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.input-group{position:relative;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;-ms-flex-align:stretch;align-items:stretch;width:100%}.input-group>.custom-file,.input-group>.custom-select,.input-group>.form-control{position:relative;-ms-flex:1 1 auto;flex:1 1 auto;width:1%;margin-bottom:0}.input-group>.custom-file+.custom-file,.input-group>.custom-file+.custom-select,.input-group>.custom-file+.form-control,.input-group>.custom-select+.custom-file,.input-group>.custom-select+.custom-select,.input-group>.custom-select+.form-control,.input-group>.form-control+.custom-file,.input-group>.form-control+.custom-select,.input-group>.form-control+.form-control{margin-left:-1px}.input-group>.custom-file .custom-file-input:focus~.custom-file-label,.input-group>.custom-select:focus,.input-group>.form-control:focus{z-index:3}.input-group>.custom-file .custom-file-input:focus{z-index:4}.input-group>.custom-select:not(:last-child),.input-group>.form-control:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.custom-select:not(:first-child),.input-group>.form-control:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.input-group>.custom-file{display:-ms-flexbox;display:flex;-ms-flex-align:center;align-items:center}.input-group>.custom-file:not(:last-child) .custom-file-label,.input-group>.custom-file:not(:last-child) .custom-file-label::after{border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.custom-file:not(:first-child) .custom-file-label{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-append,.input-group-prepend{display:-ms-flexbox;display:flex}.input-group-append .btn,.input-group-prepend .btn{position:relative;z-index:2}.input-group-append .btn+.btn,.input-group-append .btn+.input-group-text,.input-group-append .input-group-text+.btn,.input-group-append .input-group-text+.input-group-text,.input-group-prepend .btn+.btn,.input-group-prepend .btn+.input-group-text,.input-group-prepend .input-group-text+.btn,.input-group-prepend .input-group-text+.input-group-text{margin-left:-1px}.input-group-prepend{margin-right:-1px}.input-group-append{margin-left:-1px}.input-group-text{display:-ms-flexbox;display:flex;-ms-flex-align:center;align-items:center;padding:.375rem .75rem;margin-bottom:0;font-size:1rem;font-weight:400;line-height:1.5;color:#495057;text-align:center;white-space:nowrap;background-color:#e9ecef;border:1px solid #ced4da;border-radius:.25rem}.input-group-text input[type=checkbox],.input-group-text input[type=radio]{margin-top:0}.input-group-lg>.form-control,.input-group-lg>.input-group-append>.btn,.input-group-lg>.input-group-append>.input-group-text,.input-group-lg>.input-group-prepend>.btn,.input-group-lg>.input-group-prepend>.input-group-text{height:calc(2.875rem + 2px);padding:.5rem 1rem;font-size:1.25rem;line-height:1.5;border-radius:.3rem}.input-group-sm>.form-control,.input-group-sm>.input-group-append>.btn,.input-group-sm>.input-group-append>.input-group-text,.input-group-sm>.input-group-prepend>.btn,.input-group-sm>.input-group-prepend>.input-group-text{height:calc(1.8125rem + 2px);padding:.25rem .5rem;font-size:.875rem;line-height:1.5;border-radius:.2rem}.input-group>.input-group-append:last-child>.btn:not(:last-child):not(.dropdown-toggle),.input-group>.input-group-append:last-child>.input-group-text:not(:last-child),.input-group>.input-group-append:not(:last-child)>.btn,.input-group>.input-group-append:not(:last-child)>.input-group-text,.input-group>.input-group-prepend>.btn,.input-group>.input-group-prepend>.input-group-text{border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.input-group-append>.btn,.input-group>.input-group-append>.input-group-text,.input-group>.input-group-prepend:first-child>.btn:not(:first-child),.input-group>.input-group-prepend:first-child>.input-group-text:not(:first-child),.input-group>.input-group-prepend:not(:first-child)>.btn,.input-group>.input-group-prepend:not(:first-child)>.input-group-text{border-top-left-radius:0;border-bottom-left-radius:0}.custom-control{position:relative;display:block;min-height:1.5rem;padding-left:1.5rem}.custom-control-inline{display:-ms-inline-flexbox;display:inline-flex;margin-right:1rem}.custom-control-input{position:absolute;z-index:-1;opacity:0}.custom-control-input:checked~.custom-control-label::before{color:#fff;background-color:#007bff}.custom-control-input:focus~.custom-control-label::before{box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(0,123,255,.25)}.custom-control-input:active~.custom-control-label::before{color:#fff;background-color:#b3d7ff}.custom-control-input:disabled~.custom-control-label{color:#6c757d}.custom-control-input:disabled~.custom-control-label::before{background-color:#e9ecef}.custom-control-label{position:relative;margin-bottom:0}.custom-control-label::before{position:absolute;top:.25rem;left:-1.5rem;display:block;width:1rem;height:1rem;pointer-events:none;content:"";-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-color:#dee2e6}.custom-control-label::after{position:absolute;top:.25rem;left:-1.5rem;display:block;width:1rem;height:1rem;content:"";background-repeat:no-repeat;background-position:center center;background-size:50% 50%}.custom-checkbox .custom-control-label::before{border-radius:.25rem}.custom-checkbox .custom-control-input:checked~.custom-control-label::before{background-color:#007bff}.custom-checkbox .custom-control-input:checked~.custom-control-label::after{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='%23fff' d='M6.564.75l-3.59 3.612-1.538-1.55L0 4.26 2.974 7.25 8 2.193z'/%3E%3C/svg%3E")}.custom-checkbox .custom-control-input:indeterminate~.custom-control-label::before{background-color:#007bff}.custom-checkbox .custom-control-input:indeterminate~.custom-control-label::after{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 4'%3E%3Cpath stroke='%23fff' d='M0 2h4'/%3E%3C/svg%3E")}.custom-checkbox .custom-control-input:disabled:checked~.custom-control-label::before{background-color:rgba(0,123,255,.5)}.custom-checkbox .custom-control-input:disabled:indeterminate~.custom-control-label::before{background-color:rgba(0,123,255,.5)}.custom-radio .custom-control-label::before{border-radius:50%}.custom-radio .custom-control-input:checked~.custom-control-label::before{background-color:#007bff}.custom-radio .custom-control-input:checked~.custom-control-label::after{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='%23fff'/%3E%3C/svg%3E")}.custom-radio .custom-control-input:disabled:checked~.custom-control-label::before{background-color:rgba(0,123,255,.5)}.custom-select{display:inline-block;width:100%;height:calc(2.25rem + 2px);padding:.375rem 1.75rem .375rem .75rem;line-height:1.5;color:#495057;vertical-align:middle;background:#fff url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 5'%3E%3Cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E") no-repeat right .75rem center;background-size:8px 10px;border:1px solid #ced4da;border-radius:.25rem;-webkit-appearance:none;-moz-appearance:none;appearance:none}.custom-select:focus{border-color:#80bdff;outline:0;box-shadow:0 0 0 .2rem rgba(128,189,255,.5)}.custom-select:focus::-ms-value{color:#495057;background-color:#fff}.custom-select[multiple],.custom-select[size]:not([size="1"]){height:auto;padding-right:.75rem;background-image:none}.custom-select:disabled{color:#6c757d;background-color:#e9ecef}.custom-select::-ms-expand{opacity:0}.custom-select-sm{height:calc(1.8125rem + 2px);padding-top:.375rem;padding-bottom:.375rem;font-size:75%}.custom-select-lg{height:calc(2.875rem + 2px);padding-top:.375rem;padding-bottom:.375rem;font-size:125%}.custom-file{position:relative;display:inline-block;width:100%;height:calc(2.25rem + 2px);margin-bottom:0}.custom-file-input{position:relative;z-index:2;width:100%;height:calc(2.25rem + 2px);margin:0;opacity:0}.custom-file-input:focus~.custom-file-label{border-color:#80bdff;box-shadow:0 0 0 .2rem rgba(0,123,255,.25)}.custom-file-input:focus~.custom-file-label::after{border-color:#80bdff}.custom-file-input:disabled~.custom-file-label{background-color:#e9ecef}.custom-file-input:lang(en)~.custom-file-label::after{content:"Browse"}.custom-file-label{position:absolute;top:0;right:0;left:0;z-index:1;height:calc(2.25rem + 2px);padding:.375rem .75rem;line-height:1.5;color:#495057;background-color:#fff;border:1px solid #ced4da;border-radius:.25rem}.custom-file-label::after{position:absolute;top:0;right:0;bottom:0;z-index:3;display:block;height:2.25rem;padding:.375rem .75rem;line-height:1.5;color:#495057;content:"Browse";background-color:#e9ecef;border-left:1px solid #ced4da;border-radius:0 .25rem .25rem 0}.custom-range{width:100%;padding-left:0;background-color:transparent;-webkit-appearance:none;-moz-appearance:none;appearance:none}.custom-range:focus{outline:0}.custom-range:focus::-webkit-slider-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(0,123,255,.25)}.custom-range:focus::-moz-range-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(0,123,255,.25)}.custom-range:focus::-ms-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(0,123,255,.25)}.custom-range::-moz-focus-outer{border:0}.custom-range::-webkit-slider-thumb{width:1rem;height:1rem;margin-top:-.25rem;background-color:#007bff;border:0;border-radius:1rem;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;-webkit-appearance:none;appearance:none}@media screen and (prefers-reduced-motion:reduce){.custom-range::-webkit-slider-thumb{transition:none}}.custom-range::-webkit-slider-thumb:active{background-color:#b3d7ff}.custom-range::-webkit-slider-runnable-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.custom-range::-moz-range-thumb{width:1rem;height:1rem;background-color:#007bff;border:0;border-radius:1rem;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;-moz-appearance:none;appearance:none}@media screen and (prefers-reduced-motion:reduce){.custom-range::-moz-range-thumb{transition:none}}.custom-range::-moz-range-thumb:active{background-color:#b3d7ff}.custom-range::-moz-range-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.custom-range::-ms-thumb{width:1rem;height:1rem;margin-top:0;margin-right:.2rem;margin-left:.2rem;background-color:#007bff;border:0;border-radius:1rem;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;appearance:none}@media screen and (prefers-reduced-motion:reduce){.custom-range::-ms-thumb{transition:none}}.custom-range::-ms-thumb:active{background-color:#b3d7ff}.custom-range::-ms-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:transparent;border-color:transparent;border-width:.5rem}.custom-range::-ms-fill-lower{background-color:#dee2e6;border-radius:1rem}.custom-range::-ms-fill-upper{margin-right:15px;background-color:#dee2e6;border-radius:1rem}.custom-control-label::before,.custom-file-label,.custom-select{transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media screen and (prefers-reduced-motion:reduce){.custom-control-label::before,.custom-file-label,.custom-select{transition:none}}.nav{display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;padding-left:0;margin-bottom:0;list-style:none}.nav-link{display:block;padding:.5rem 1rem}.nav-link:focus,.nav-link:hover{text-decoration:none}.nav-link.disabled{color:#6c757d}.nav-tabs{border-bottom:1px solid #dee2e6}.nav-tabs .nav-item{margin-bottom:-1px}.nav-tabs .nav-link{border:1px solid transparent;border-top-left-radius:.25rem;border-top-right-radius:.25rem}.nav-tabs .nav-link:focus,.nav-tabs .nav-link:hover{border-color:#e9ecef #e9ecef #dee2e6}.nav-tabs .nav-link.disabled{color:#6c757d;background-color:transparent;border-color:transparent}.nav-tabs .nav-item.show .nav-link,.nav-tabs .nav-link.active{color:#495057;background-color:#fff;border-color:#dee2e6 #dee2e6 #fff}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.nav-pills .nav-link{border-radius:.25rem}.nav-pills .nav-link.active,.nav-pills .show>.nav-link{color:#fff;background-color:#007bff}.nav-fill .nav-item{-ms-flex:1 1 auto;flex:1 1 auto;text-align:center}.nav-justified .nav-item{-ms-flex-preferred-size:0;flex-basis:0;-ms-flex-positive:1;flex-grow:1;text-align:center}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.navbar{position:relative;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;-ms-flex-align:center;align-items:center;-ms-flex-pack:justify;justify-content:space-between;padding:.5rem 1rem}.navbar>.container,.navbar>.container-fluid{display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;-ms-flex-align:center;align-items:center;-ms-flex-pack:justify;justify-content:space-between}.navbar-brand{display:inline-block;padding-top:.3125rem;padding-bottom:.3125rem;margin-right:1rem;font-size:1.25rem;line-height:inherit;white-space:nowrap}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-nav{display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;padding-left:0;margin-bottom:0;list-style:none}.navbar-nav .nav-link{padding-right:0;padding-left:0}.navbar-nav .dropdown-menu{position:static;float:none}.navbar-text{display:inline-block;padding-top:.5rem;padding-bottom:.5rem}.navbar-collapse{-ms-flex-preferred-size:100%;flex-basis:100%;-ms-flex-positive:1;flex-grow:1;-ms-flex-align:center;align-items:center}.navbar-toggler{padding:.25rem .75rem;font-size:1.25rem;line-height:1;background-color:transparent;border:1px solid transparent;border-radius:.25rem}.navbar-toggler:focus,.navbar-toggler:hover{text-decoration:none}.navbar-toggler:not(:disabled):not(.disabled){cursor:pointer}.navbar-toggler-icon{display:inline-block;width:1.5em;height:1.5em;vertical-align:middle;content:"";background:no-repeat center center;background-size:100% 100%}@media (max-width:575.98px){.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid{padding-right:0;padding-left:0}}@media (min-width:576px){.navbar-expand-sm{-ms-flex-flow:row nowrap;flex-flow:row nowrap;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand-sm .navbar-nav{-ms-flex-direction:row;flex-direction:row}.navbar-expand-sm .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-sm .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-sm .navbar-collapse{display:-ms-flexbox!important;display:flex!important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand-sm .navbar-toggler{display:none}}@media (max-width:767.98px){.navbar-expand-md>.container,.navbar-expand-md>.container-fluid{padding-right:0;padding-left:0}}@media (min-width:768px){.navbar-expand-md{-ms-flex-flow:row nowrap;flex-flow:row nowrap;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand-md .navbar-nav{-ms-flex-direction:row;flex-direction:row}.navbar-expand-md .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-md .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-md>.container,.navbar-expand-md>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-md .navbar-collapse{display:-ms-flexbox!important;display:flex!important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand-md .navbar-toggler{display:none}}@media (max-width:991.98px){.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid{padding-right:0;padding-left:0}}@media (min-width:992px){.navbar-expand-lg{-ms-flex-flow:row nowrap;flex-flow:row nowrap;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand-lg .navbar-nav{-ms-flex-direction:row;flex-direction:row}.navbar-expand-lg .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-lg .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-lg .navbar-collapse{display:-ms-flexbox!important;display:flex!important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand-lg .navbar-toggler{display:none}}@media (max-width:1199.98px){.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid{padding-right:0;padding-left:0}}@media (min-width:1200px){.navbar-expand-xl{-ms-flex-flow:row nowrap;flex-flow:row nowrap;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand-xl .navbar-nav{-ms-flex-direction:row;flex-direction:row}.navbar-expand-xl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xl .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-xl .navbar-collapse{display:-ms-flexbox!important;display:flex!important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand-xl .navbar-toggler{display:none}}.navbar-expand{-ms-flex-flow:row nowrap;flex-flow:row nowrap;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand>.container,.navbar-expand>.container-fluid{padding-right:0;padding-left:0}.navbar-expand .navbar-nav{-ms-flex-direction:row;flex-direction:row}.navbar-expand .navbar-nav .dropdown-menu{position:absolute}.navbar-expand .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand>.container,.navbar-expand>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand .navbar-collapse{display:-ms-flexbox!important;display:flex!important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand .navbar-toggler{display:none}.navbar-light .navbar-brand{color:rgba(0,0,0,.9)}.navbar-light .navbar-brand:focus,.navbar-light .navbar-brand:hover{color:rgba(0,0,0,.9)}.navbar-light .navbar-nav .nav-link{color:rgba(0,0,0,.5)}.navbar-light .navbar-nav .nav-link:focus,.navbar-light .navbar-nav .nav-link:hover{color:rgba(0,0,0,.7)}.navbar-light .navbar-nav .nav-link.disabled{color:rgba(0,0,0,.3)}.navbar-light .navbar-nav .active>.nav-link,.navbar-light .navbar-nav .nav-link.active,.navbar-light .navbar-nav .nav-link.show,.navbar-light .navbar-nav .show>.nav-link{color:rgba(0,0,0,.9)}.navbar-light .navbar-toggler{color:rgba(0,0,0,.5);border-color:rgba(0,0,0,.1)}.navbar-light .navbar-toggler-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(0, 0, 0, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E")}.navbar-light .navbar-text{color:rgba(0,0,0,.5)}.navbar-light .navbar-text a{color:rgba(0,0,0,.9)}.navbar-light .navbar-text a:focus,.navbar-light .navbar-text a:hover{color:rgba(0,0,0,.9)}.navbar-dark .navbar-brand{color:#fff}.navbar-dark .navbar-brand:focus,.navbar-dark .navbar-brand:hover{color:#fff}.navbar-dark .navbar-nav .nav-link{color:rgba(255,255,255,.5)}.navbar-dark .navbar-nav .nav-link:focus,.navbar-dark .navbar-nav .nav-link:hover{color:rgba(255,255,255,.75)}.navbar-dark .navbar-nav .nav-link.disabled{color:rgba(255,255,255,.25)}.navbar-dark .navbar-nav .active>.nav-link,.navbar-dark .navbar-nav .nav-link.active,.navbar-dark .navbar-nav .nav-link.show,.navbar-dark .navbar-nav .show>.nav-link{color:#fff}.navbar-dark .navbar-toggler{color:rgba(255,255,255,.5);border-color:rgba(255,255,255,.1)}.navbar-dark .navbar-toggler-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(255, 255, 255, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E")}.navbar-dark .navbar-text{color:rgba(255,255,255,.5)}.navbar-dark .navbar-text a{color:#fff}.navbar-dark .navbar-text a:focus,.navbar-dark .navbar-text a:hover{color:#fff}.card{position:relative;display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;min-width:0;word-wrap:break-word;background-color:#fff;background-clip:border-box;border:1px solid rgba(0,0,0,.125);border-radius:.25rem}.card>hr{margin-right:0;margin-left:0}.card>.list-group:first-child .list-group-item:first-child{border-top-left-radius:.25rem;border-top-right-radius:.25rem}.card>.list-group:last-child .list-group-item:last-child{border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.card-body{-ms-flex:1 1 auto;flex:1 1 auto;padding:1.25rem}.card-title{margin-bottom:.75rem}.card-subtitle{margin-top:-.375rem;margin-bottom:0}.card-text:last-child{margin-bottom:0}.card-link:hover{text-decoration:none}.card-link+.card-link{margin-left:1.25rem}.card-header{padding:.75rem 1.25rem;margin-bottom:0;background-color:rgba(0,0,0,.03);border-bottom:1px solid rgba(0,0,0,.125)}.card-header:first-child{border-radius:calc(.25rem - 1px) calc(.25rem - 1px) 0 0}.card-header+.list-group .list-group-item:first-child{border-top:0}.card-footer{padding:.75rem 1.25rem;background-color:rgba(0,0,0,.03);border-top:1px solid rgba(0,0,0,.125)}.card-footer:last-child{border-radius:0 0 calc(.25rem - 1px) calc(.25rem - 1px)}.card-header-tabs{margin-right:-.625rem;margin-bottom:-.75rem;margin-left:-.625rem;border-bottom:0}.card-header-pills{margin-right:-.625rem;margin-left:-.625rem}.card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1.25rem}.card-img{width:100%;border-radius:calc(.25rem - 1px)}.card-img-top{width:100%;border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.card-img-bottom{width:100%;border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}.card-deck{display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column}.card-deck .card{margin-bottom:15px}@media (min-width:576px){.card-deck{-ms-flex-flow:row wrap;flex-flow:row wrap;margin-right:-15px;margin-left:-15px}.card-deck .card{display:-ms-flexbox;display:flex;-ms-flex:1 0 0%;flex:1 0 0%;-ms-flex-direction:column;flex-direction:column;margin-right:15px;margin-bottom:0;margin-left:15px}}.card-group{display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column}.card-group>.card{margin-bottom:15px}@media (min-width:576px){.card-group{-ms-flex-flow:row wrap;flex-flow:row wrap}.card-group>.card{-ms-flex:1 0 0%;flex:1 0 0%;margin-bottom:0}.card-group>.card+.card{margin-left:0;border-left:0}.card-group>.card:first-child{border-top-right-radius:0;border-bottom-right-radius:0}.card-group>.card:first-child .card-header,.card-group>.card:first-child .card-img-top{border-top-right-radius:0}.card-group>.card:first-child .card-footer,.card-group>.card:first-child .card-img-bottom{border-bottom-right-radius:0}.card-group>.card:last-child{border-top-left-radius:0;border-bottom-left-radius:0}.card-group>.card:last-child .card-header,.card-group>.card:last-child .card-img-top{border-top-left-radius:0}.card-group>.card:last-child .card-footer,.card-group>.card:last-child .card-img-bottom{border-bottom-left-radius:0}.card-group>.card:only-child{border-radius:.25rem}.card-group>.card:only-child .card-header,.card-group>.card:only-child .card-img-top{border-top-left-radius:.25rem;border-top-right-radius:.25rem}.card-group>.card:only-child .card-footer,.card-group>.card:only-child .card-img-bottom{border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.card-group>.card:not(:first-child):not(:last-child):not(:only-child){border-radius:0}.card-group>.card:not(:first-child):not(:last-child):not(:only-child) .card-footer,.card-group>.card:not(:first-child):not(:last-child):not(:only-child) .card-header,.card-group>.card:not(:first-child):not(:last-child):not(:only-child) .card-img-bottom,.card-group>.card:not(:first-child):not(:last-child):not(:only-child) .card-img-top{border-radius:0}}.card-columns .card{margin-bottom:.75rem}@media (min-width:576px){.card-columns{-webkit-column-count:3;-moz-column-count:3;column-count:3;-webkit-column-gap:1.25rem;-moz-column-gap:1.25rem;column-gap:1.25rem;orphans:1;widows:1}.card-columns .card{display:inline-block;width:100%}}.accordion .card:not(:first-of-type):not(:last-of-type){border-bottom:0;border-radius:0}.accordion .card:not(:first-of-type) .card-header:first-child{border-radius:0}.accordion .card:first-of-type{border-bottom:0;border-bottom-right-radius:0;border-bottom-left-radius:0}.accordion .card:last-of-type{border-top-left-radius:0;border-top-right-radius:0}.breadcrumb{display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;padding:.75rem 1rem;margin-bottom:1rem;list-style:none;background-color:#e9ecef;border-radius:.25rem}.breadcrumb-item+.breadcrumb-item{padding-left:.5rem}.breadcrumb-item+.breadcrumb-item::before{display:inline-block;padding-right:.5rem;color:#6c757d;content:"/"}.breadcrumb-item+.breadcrumb-item:hover::before{text-decoration:underline}.breadcrumb-item+.breadcrumb-item:hover::before{text-decoration:none}.breadcrumb-item.active{color:#6c757d}.pagination{display:-ms-flexbox;display:flex;padding-left:0;list-style:none;border-radius:.25rem}.page-link{position:relative;display:block;padding:.5rem .75rem;margin-left:-1px;line-height:1.25;color:#007bff;background-color:#fff;border:1px solid #dee2e6}.page-link:hover{z-index:2;color:#0056b3;text-decoration:none;background-color:#e9ecef;border-color:#dee2e6}.page-link:focus{z-index:2;outline:0;box-shadow:0 0 0 .2rem rgba(0,123,255,.25)}.page-link:not(:disabled):not(.disabled){cursor:pointer}.page-item:first-child .page-link{margin-left:0;border-top-left-radius:.25rem;border-bottom-left-radius:.25rem}.page-item:last-child .page-link{border-top-right-radius:.25rem;border-bottom-right-radius:.25rem}.page-item.active .page-link{z-index:1;color:#fff;background-color:#007bff;border-color:#007bff}.page-item.disabled .page-link{color:#6c757d;pointer-events:none;cursor:auto;background-color:#fff;border-color:#dee2e6}.pagination-lg .page-link{padding:.75rem 1.5rem;font-size:1.25rem;line-height:1.5}.pagination-lg .page-item:first-child .page-link{border-top-left-radius:.3rem;border-bottom-left-radius:.3rem}.pagination-lg .page-item:last-child .page-link{border-top-right-radius:.3rem;border-bottom-right-radius:.3rem}.pagination-sm .page-link{padding:.25rem .5rem;font-size:.875rem;line-height:1.5}.pagination-sm .page-item:first-child .page-link{border-top-left-radius:.2rem;border-bottom-left-radius:.2rem}.pagination-sm .page-item:last-child .page-link{border-top-right-radius:.2rem;border-bottom-right-radius:.2rem}.badge{display:inline-block;padding:.25em .4em;font-size:75%;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.badge-pill{padding-right:.6em;padding-left:.6em;border-radius:10rem}.badge-primary{color:#fff;background-color:#007bff}.badge-primary[href]:focus,.badge-primary[href]:hover{color:#fff;text-decoration:none;background-color:#0062cc}.badge-secondary{color:#fff;background-color:#6c757d}.badge-secondary[href]:focus,.badge-secondary[href]:hover{color:#fff;text-decoration:none;background-color:#545b62}.badge-success{color:#fff;background-color:#28a745}.badge-success[href]:focus,.badge-success[href]:hover{color:#fff;text-decoration:none;background-color:#1e7e34}.badge-info{color:#fff;background-color:#17a2b8}.badge-info[href]:focus,.badge-info[href]:hover{color:#fff;text-decoration:none;background-color:#117a8b}.badge-warning{color:#212529;background-color:#ffc107}.badge-warning[href]:focus,.badge-warning[href]:hover{color:#212529;text-decoration:none;background-color:#d39e00}.badge-danger{color:#fff;background-color:#dc3545}.badge-danger[href]:focus,.badge-danger[href]:hover{color:#fff;text-decoration:none;background-color:#bd2130}.badge-light{color:#212529;background-color:#f8f9fa}.badge-light[href]:focus,.badge-light[href]:hover{color:#212529;text-decoration:none;background-color:#dae0e5}.badge-dark{color:#fff;background-color:#343a40}.badge-dark[href]:focus,.badge-dark[href]:hover{color:#fff;text-decoration:none;background-color:#1d2124}.jumbotron{padding:2rem 1rem;margin-bottom:2rem;background-color:#e9ecef;border-radius:.3rem}@media (min-width:576px){.jumbotron{padding:4rem 2rem}}.jumbotron-fluid{padding-right:0;padding-left:0;border-radius:0}.alert{position:relative;padding:.75rem 1.25rem;margin-bottom:1rem;border:1px solid transparent;border-radius:.25rem}.alert-heading{color:inherit}.alert-link{font-weight:700}.alert-dismissible{padding-right:4rem}.alert-dismissible .close{position:absolute;top:0;right:0;padding:.75rem 1.25rem;color:inherit}.alert-primary{color:#004085;background-color:#cce5ff;border-color:#b8daff}.alert-primary hr{border-top-color:#9fcdff}.alert-primary .alert-link{color:#002752}.alert-secondary{color:#383d41;background-color:#e2e3e5;border-color:#d6d8db}.alert-secondary hr{border-top-color:#c8cbcf}.alert-secondary .alert-link{color:#202326}.alert-success{color:#155724;background-color:#d4edda;border-color:#c3e6cb}.alert-success hr{border-top-color:#b1dfbb}.alert-success .alert-link{color:#0b2e13}.alert-info{color:#0c5460;background-color:#d1ecf1;border-color:#bee5eb}.alert-info hr{border-top-color:#abdde5}.alert-info .alert-link{color:#062c33}.alert-warning{color:#856404;background-color:#fff3cd;border-color:#ffeeba}.alert-warning hr{border-top-color:#ffe8a1}.alert-warning .alert-link{color:#533f03}.alert-danger{color:#721c24;background-color:#f8d7da;border-color:#f5c6cb}.alert-danger hr{border-top-color:#f1b0b7}.alert-danger .alert-link{color:#491217}.alert-light{color:#818182;background-color:#fefefe;border-color:#fdfdfe}.alert-light hr{border-top-color:#ececf6}.alert-light .alert-link{color:#686868}.alert-dark{color:#1b1e21;background-color:#d6d8d9;border-color:#c6c8ca}.alert-dark hr{border-top-color:#b9bbbe}.alert-dark .alert-link{color:#040505}@-webkit-keyframes progress-bar-stripes{from{background-position:1rem 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:1rem 0}to{background-position:0 0}}.progress{display:-ms-flexbox;display:flex;height:1rem;overflow:hidden;font-size:.75rem;background-color:#e9ecef;border-radius:.25rem}.progress-bar{display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;-ms-flex-pack:center;justify-content:center;color:#fff;text-align:center;white-space:nowrap;background-color:#007bff;transition:width .6s ease}@media screen and (prefers-reduced-motion:reduce){.progress-bar{transition:none}}.progress-bar-striped{background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-size:1rem 1rem}.progress-bar-animated{-webkit-animation:progress-bar-stripes 1s linear infinite;animation:progress-bar-stripes 1s linear infinite}.media{display:-ms-flexbox;display:flex;-ms-flex-align:start;align-items:flex-start}.media-body{-ms-flex:1;flex:1}.list-group{display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;padding-left:0;margin-bottom:0}.list-group-item-action{width:100%;color:#495057;text-align:inherit}.list-group-item-action:focus,.list-group-item-action:hover{color:#495057;text-decoration:none;background-color:#f8f9fa}.list-group-item-action:active{color:#212529;background-color:#e9ecef}.list-group-item{position:relative;display:block;padding:.75rem 1.25rem;margin-bottom:-1px;background-color:#fff;border:1px solid rgba(0,0,0,.125)}.list-group-item:first-child{border-top-left-radius:.25rem;border-top-right-radius:.25rem}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.list-group-item:focus,.list-group-item:hover{z-index:1;text-decoration:none}.list-group-item.disabled,.list-group-item:disabled{color:#6c757d;background-color:#fff}.list-group-item.active{z-index:2;color:#fff;background-color:#007bff;border-color:#007bff}.list-group-flush .list-group-item{border-right:0;border-left:0;border-radius:0}.list-group-flush:first-child .list-group-item:first-child{border-top:0}.list-group-flush:last-child .list-group-item:last-child{border-bottom:0}.list-group-item-primary{color:#004085;background-color:#b8daff}.list-group-item-primary.list-group-item-action:focus,.list-group-item-primary.list-group-item-action:hover{color:#004085;background-color:#9fcdff}.list-group-item-primary.list-group-item-action.active{color:#fff;background-color:#004085;border-color:#004085}.list-group-item-secondary{color:#383d41;background-color:#d6d8db}.list-group-item-secondary.list-group-item-action:focus,.list-group-item-secondary.list-group-item-action:hover{color:#383d41;background-color:#c8cbcf}.list-group-item-secondary.list-group-item-action.active{color:#fff;background-color:#383d41;border-color:#383d41}.list-group-item-success{color:#155724;background-color:#c3e6cb}.list-group-item-success.list-group-item-action:focus,.list-group-item-success.list-group-item-action:hover{color:#155724;background-color:#b1dfbb}.list-group-item-success.list-group-item-action.active{color:#fff;background-color:#155724;border-color:#155724}.list-group-item-info{color:#0c5460;background-color:#bee5eb}.list-group-item-info.list-group-item-action:focus,.list-group-item-info.list-group-item-action:hover{color:#0c5460;background-color:#abdde5}.list-group-item-info.list-group-item-action.active{color:#fff;background-color:#0c5460;border-color:#0c5460}.list-group-item-warning{color:#856404;background-color:#ffeeba}.list-group-item-warning.list-group-item-action:focus,.list-group-item-warning.list-group-item-action:hover{color:#856404;background-color:#ffe8a1}.list-group-item-warning.list-group-item-action.active{color:#fff;background-color:#856404;border-color:#856404}.list-group-item-danger{color:#721c24;background-color:#f5c6cb}.list-group-item-danger.list-group-item-action:focus,.list-group-item-danger.list-group-item-action:hover{color:#721c24;background-color:#f1b0b7}.list-group-item-danger.list-group-item-action.active{color:#fff;background-color:#721c24;border-color:#721c24}.list-group-item-light{color:#818182;background-color:#fdfdfe}.list-group-item-light.list-group-item-action:focus,.list-group-item-light.list-group-item-action:hover{color:#818182;background-color:#ececf6}.list-group-item-light.list-group-item-action.active{color:#fff;background-color:#818182;border-color:#818182}.list-group-item-dark{color:#1b1e21;background-color:#c6c8ca}.list-group-item-dark.list-group-item-action:focus,.list-group-item-dark.list-group-item-action:hover{color:#1b1e21;background-color:#b9bbbe}.list-group-item-dark.list-group-item-action.active{color:#fff;background-color:#1b1e21;border-color:#1b1e21}.close{float:right;font-size:1.5rem;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;opacity:.5}.close:not(:disabled):not(.disabled){cursor:pointer}.close:not(:disabled):not(.disabled):focus,.close:not(:disabled):not(.disabled):hover{color:#000;text-decoration:none;opacity:.75}button.close{padding:0;background-color:transparent;border:0;-webkit-appearance:none}.modal-open{overflow:hidden}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;display:none;overflow:hidden;outline:0}.modal-dialog{position:relative;width:auto;margin:.5rem;pointer-events:none}.modal.fade .modal-dialog{transition:-webkit-transform .3s ease-out;transition:transform .3s ease-out;transition:transform .3s ease-out,-webkit-transform .3s ease-out;-webkit-transform:translate(0,-25%);transform:translate(0,-25%)}@media screen and (prefers-reduced-motion:reduce){.modal.fade .modal-dialog{transition:none}}.modal.show .modal-dialog{-webkit-transform:translate(0,0);transform:translate(0,0)}.modal-dialog-centered{display:-ms-flexbox;display:flex;-ms-flex-align:center;align-items:center;min-height:calc(100% - (.5rem * 2))}.modal-dialog-centered::before{display:block;height:calc(100vh - (.5rem * 2));content:""}.modal-content{position:relative;display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;width:100%;pointer-events:auto;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,.2);border-radius:.3rem;outline:0}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{opacity:0}.modal-backdrop.show{opacity:.5}.modal-header{display:-ms-flexbox;display:flex;-ms-flex-align:start;align-items:flex-start;-ms-flex-pack:justify;justify-content:space-between;padding:1rem;border-bottom:1px solid #e9ecef;border-top-left-radius:.3rem;border-top-right-radius:.3rem}.modal-header .close{padding:1rem;margin:-1rem -1rem -1rem auto}.modal-title{margin-bottom:0;line-height:1.5}.modal-body{position:relative;-ms-flex:1 1 auto;flex:1 1 auto;padding:1rem}.modal-footer{display:-ms-flexbox;display:flex;-ms-flex-align:center;align-items:center;-ms-flex-pack:end;justify-content:flex-end;padding:1rem;border-top:1px solid #e9ecef}.modal-footer>:not(:first-child){margin-left:.25rem}.modal-footer>:not(:last-child){margin-right:.25rem}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:576px){.modal-dialog{max-width:500px;margin:1.75rem auto}.modal-dialog-centered{min-height:calc(100% - (1.75rem * 2))}.modal-dialog-centered::before{height:calc(100vh - (1.75rem * 2))}.modal-sm{max-width:300px}}@media (min-width:992px){.modal-lg{max-width:800px}}.tooltip{position:absolute;z-index:1070;display:block;margin:0;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;opacity:0}.tooltip.show{opacity:.9}.tooltip .arrow{position:absolute;display:block;width:.8rem;height:.4rem}.tooltip .arrow::before{position:absolute;content:"";border-color:transparent;border-style:solid}.bs-tooltip-auto[x-placement^=top],.bs-tooltip-top{padding:.4rem 0}.bs-tooltip-auto[x-placement^=top] .arrow,.bs-tooltip-top .arrow{bottom:0}.bs-tooltip-auto[x-placement^=top] .arrow::before,.bs-tooltip-top .arrow::before{top:0;border-width:.4rem .4rem 0;border-top-color:#000}.bs-tooltip-auto[x-placement^=right],.bs-tooltip-right{padding:0 .4rem}.bs-tooltip-auto[x-placement^=right] .arrow,.bs-tooltip-right .arrow{left:0;width:.4rem;height:.8rem}.bs-tooltip-auto[x-placement^=right] .arrow::before,.bs-tooltip-right .arrow::before{right:0;border-width:.4rem .4rem .4rem 0;border-right-color:#000}.bs-tooltip-auto[x-placement^=bottom],.bs-tooltip-bottom{padding:.4rem 0}.bs-tooltip-auto[x-placement^=bottom] .arrow,.bs-tooltip-bottom .arrow{top:0}.bs-tooltip-auto[x-placement^=bottom] .arrow::before,.bs-tooltip-bottom .arrow::before{bottom:0;border-width:0 .4rem .4rem;border-bottom-color:#000}.bs-tooltip-auto[x-placement^=left],.bs-tooltip-left{padding:0 .4rem}.bs-tooltip-auto[x-placement^=left] .arrow,.bs-tooltip-left .arrow{right:0;width:.4rem;height:.8rem}.bs-tooltip-auto[x-placement^=left] .arrow::before,.bs-tooltip-left .arrow::before{left:0;border-width:.4rem 0 .4rem .4rem;border-left-color:#000}.tooltip-inner{max-width:200px;padding:.25rem .5rem;color:#fff;text-align:center;background-color:#000;border-radius:.25rem}.popover{position:absolute;top:0;left:0;z-index:1060;display:block;max-width:276px;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,.2);border-radius:.3rem}.popover .arrow{position:absolute;display:block;width:1rem;height:.5rem;margin:0 .3rem}.popover .arrow::after,.popover .arrow::before{position:absolute;display:block;content:"";border-color:transparent;border-style:solid}.bs-popover-auto[x-placement^=top],.bs-popover-top{margin-bottom:.5rem}.bs-popover-auto[x-placement^=top] .arrow,.bs-popover-top .arrow{bottom:calc((.5rem + 1px) * -1)}.bs-popover-auto[x-placement^=top] .arrow::after,.bs-popover-auto[x-placement^=top] .arrow::before,.bs-popover-top .arrow::after,.bs-popover-top .arrow::before{border-width:.5rem .5rem 0}.bs-popover-auto[x-placement^=top] .arrow::before,.bs-popover-top .arrow::before{bottom:0;border-top-color:rgba(0,0,0,.25)}.bs-popover-auto[x-placement^=top] .arrow::after,.bs-popover-top .arrow::after{bottom:1px;border-top-color:#fff}.bs-popover-auto[x-placement^=right],.bs-popover-right{margin-left:.5rem}.bs-popover-auto[x-placement^=right] .arrow,.bs-popover-right .arrow{left:calc((.5rem + 1px) * -1);width:.5rem;height:1rem;margin:.3rem 0}.bs-popover-auto[x-placement^=right] .arrow::after,.bs-popover-auto[x-placement^=right] .arrow::before,.bs-popover-right .arrow::after,.bs-popover-right .arrow::before{border-width:.5rem .5rem .5rem 0}.bs-popover-auto[x-placement^=right] .arrow::before,.bs-popover-right .arrow::before{left:0;border-right-color:rgba(0,0,0,.25)}.bs-popover-auto[x-placement^=right] .arrow::after,.bs-popover-right .arrow::after{left:1px;border-right-color:#fff}.bs-popover-auto[x-placement^=bottom],.bs-popover-bottom{margin-top:.5rem}.bs-popover-auto[x-placement^=bottom] .arrow,.bs-popover-bottom .arrow{top:calc((.5rem + 1px) * -1)}.bs-popover-auto[x-placement^=bottom] .arrow::after,.bs-popover-auto[x-placement^=bottom] .arrow::before,.bs-popover-bottom .arrow::after,.bs-popover-bottom .arrow::before{border-width:0 .5rem .5rem .5rem}.bs-popover-auto[x-placement^=bottom] .arrow::before,.bs-popover-bottom .arrow::before{top:0;border-bottom-color:rgba(0,0,0,.25)}.bs-popover-auto[x-placement^=bottom] .arrow::after,.bs-popover-bottom .arrow::after{top:1px;border-bottom-color:#fff}.bs-popover-auto[x-placement^=bottom] .popover-header::before,.bs-popover-bottom .popover-header::before{position:absolute;top:0;left:50%;display:block;width:1rem;margin-left:-.5rem;content:"";border-bottom:1px solid #f7f7f7}.bs-popover-auto[x-placement^=left],.bs-popover-left{margin-right:.5rem}.bs-popover-auto[x-placement^=left] .arrow,.bs-popover-left .arrow{right:calc((.5rem + 1px) * -1);width:.5rem;height:1rem;margin:.3rem 0}.bs-popover-auto[x-placement^=left] .arrow::after,.bs-popover-auto[x-placement^=left] .arrow::before,.bs-popover-left .arrow::after,.bs-popover-left .arrow::before{border-width:.5rem 0 .5rem .5rem}.bs-popover-auto[x-placement^=left] .arrow::before,.bs-popover-left .arrow::before{right:0;border-left-color:rgba(0,0,0,.25)}.bs-popover-auto[x-placement^=left] .arrow::after,.bs-popover-left .arrow::after{right:1px;border-left-color:#fff}.popover-header{padding:.5rem .75rem;margin-bottom:0;font-size:1rem;color:inherit;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-top-left-radius:calc(.3rem - 1px);border-top-right-radius:calc(.3rem - 1px)}.popover-header:empty{display:none}.popover-body{padding:.5rem .75rem;color:#212529}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-item{position:relative;display:none;-ms-flex-align:center;align-items:center;width:100%;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000px;perspective:1000px}.carousel-item-next,.carousel-item-prev,.carousel-item.active{display:block;transition:-webkit-transform .6s ease;transition:transform .6s ease;transition:transform .6s ease,-webkit-transform .6s ease}@media screen and (prefers-reduced-motion:reduce){.carousel-item-next,.carousel-item-prev,.carousel-item.active{transition:none}}.carousel-item-next,.carousel-item-prev{position:absolute;top:0}.carousel-item-next.carousel-item-left,.carousel-item-prev.carousel-item-right{-webkit-transform:translateX(0);transform:translateX(0)}@supports ((-webkit-transform-style:preserve-3d) or (transform-style:preserve-3d)){.carousel-item-next.carousel-item-left,.carousel-item-prev.carousel-item-right{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.active.carousel-item-right,.carousel-item-next{-webkit-transform:translateX(100%);transform:translateX(100%)}@supports ((-webkit-transform-style:preserve-3d) or (transform-style:preserve-3d)){.active.carousel-item-right,.carousel-item-next{-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}}.active.carousel-item-left,.carousel-item-prev{-webkit-transform:translateX(-100%);transform:translateX(-100%)}@supports ((-webkit-transform-style:preserve-3d) or (transform-style:preserve-3d)){.active.carousel-item-left,.carousel-item-prev{-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}}.carousel-fade .carousel-item{opacity:0;transition-duration:.6s;transition-property:opacity}.carousel-fade .carousel-item-next.carousel-item-left,.carousel-fade .carousel-item-prev.carousel-item-right,.carousel-fade .carousel-item.active{opacity:1}.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-right{opacity:0}.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-prev,.carousel-fade .carousel-item-next,.carousel-fade .carousel-item-prev,.carousel-fade .carousel-item.active{-webkit-transform:translateX(0);transform:translateX(0)}@supports ((-webkit-transform-style:preserve-3d) or (transform-style:preserve-3d)){.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-prev,.carousel-fade .carousel-item-next,.carousel-fade .carousel-item-prev,.carousel-fade .carousel-item.active{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.carousel-control-next,.carousel-control-prev{position:absolute;top:0;bottom:0;display:-ms-flexbox;display:flex;-ms-flex-align:center;align-items:center;-ms-flex-pack:center;justify-content:center;width:15%;color:#fff;text-align:center;opacity:.5}.carousel-control-next:focus,.carousel-control-next:hover,.carousel-control-prev:focus,.carousel-control-prev:hover{color:#fff;text-decoration:none;outline:0;opacity:.9}.carousel-control-prev{left:0}.carousel-control-next{right:0}.carousel-control-next-icon,.carousel-control-prev-icon{display:inline-block;width:20px;height:20px;background:transparent no-repeat center center;background-size:100% 100%}.carousel-control-prev-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3E%3Cpath d='M5.25 0l-4 4 4 4 1.5-1.5-2.5-2.5 2.5-2.5-1.5-1.5z'/%3E%3C/svg%3E")}.carousel-control-next-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3E%3Cpath d='M2.75 0l-1.5 1.5 2.5 2.5-2.5 2.5 1.5 1.5 4-4-4-4z'/%3E%3C/svg%3E")}.carousel-indicators{position:absolute;right:0;bottom:10px;left:0;z-index:15;display:-ms-flexbox;display:flex;-ms-flex-pack:center;justify-content:center;padding-left:0;margin-right:15%;margin-left:15%;list-style:none}.carousel-indicators li{position:relative;-ms-flex:0 1 auto;flex:0 1 auto;width:30px;height:3px;margin-right:3px;margin-left:3px;text-indent:-999px;cursor:pointer;background-color:rgba(255,255,255,.5)}.carousel-indicators li::before{position:absolute;top:-10px;left:0;display:inline-block;width:100%;height:10px;content:""}.carousel-indicators li::after{position:absolute;bottom:-10px;left:0;display:inline-block;width:100%;height:10px;content:""}.carousel-indicators .active{background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center}.align-baseline{vertical-align:baseline!important}.align-top{vertical-align:top!important}.align-middle{vertical-align:middle!important}.align-bottom{vertical-align:bottom!important}.align-text-bottom{vertical-align:text-bottom!important}.align-text-top{vertical-align:text-top!important}.bg-primary{background-color:#007bff!important}a.bg-primary:focus,a.bg-primary:hover,button.bg-primary:focus,button.bg-primary:hover{background-color:#0062cc!important}.bg-secondary{background-color:#6c757d!important}a.bg-secondary:focus,a.bg-secondary:hover,button.bg-secondary:focus,button.bg-secondary:hover{background-color:#545b62!important}.bg-success{background-color:#28a745!important}a.bg-success:focus,a.bg-success:hover,button.bg-success:focus,button.bg-success:hover{background-color:#1e7e34!important}.bg-info{background-color:#17a2b8!important}a.bg-info:focus,a.bg-info:hover,button.bg-info:focus,button.bg-info:hover{background-color:#117a8b!important}.bg-warning{background-color:#ffc107!important}a.bg-warning:focus,a.bg-warning:hover,button.bg-warning:focus,button.bg-warning:hover{background-color:#d39e00!important}.bg-danger{background-color:#dc3545!important}a.bg-danger:focus,a.bg-danger:hover,button.bg-danger:focus,button.bg-danger:hover{background-color:#bd2130!important}.bg-light{background-color:#f8f9fa!important}a.bg-light:focus,a.bg-light:hover,button.bg-light:focus,button.bg-light:hover{background-color:#dae0e5!important}.bg-dark{background-color:#343a40!important}a.bg-dark:focus,a.bg-dark:hover,button.bg-dark:focus,button.bg-dark:hover{background-color:#1d2124!important}.bg-white{background-color:#fff!important}.bg-transparent{background-color:transparent!important}.border{border:1px solid #dee2e6!important}.border-top{border-top:1px solid #dee2e6!important}.border-right{border-right:1px solid #dee2e6!important}.border-bottom{border-bottom:1px solid #dee2e6!important}.border-left{border-left:1px solid #dee2e6!important}.border-0{border:0!important}.border-top-0{border-top:0!important}.border-right-0{border-right:0!important}.border-bottom-0{border-bottom:0!important}.border-left-0{border-left:0!important}.border-primary{border-color:#007bff!important}.border-secondary{border-color:#6c757d!important}.border-success{border-color:#28a745!important}.border-info{border-color:#17a2b8!important}.border-warning{border-color:#ffc107!important}.border-danger{border-color:#dc3545!important}.border-light{border-color:#f8f9fa!important}.border-dark{border-color:#343a40!important}.border-white{border-color:#fff!important}.rounded{border-radius:.25rem!important}.rounded-top{border-top-left-radius:.25rem!important;border-top-right-radius:.25rem!important}.rounded-right{border-top-right-radius:.25rem!important;border-bottom-right-radius:.25rem!important}.rounded-bottom{border-bottom-right-radius:.25rem!important;border-bottom-left-radius:.25rem!important}.rounded-left{border-top-left-radius:.25rem!important;border-bottom-left-radius:.25rem!important}.rounded-circle{border-radius:50%!important}.rounded-0{border-radius:0!important}.clearfix::after{display:block;clear:both;content:""}.d-none{display:none!important}.d-inline{display:inline!important}.d-inline-block{display:inline-block!important}.d-block{display:block!important}.d-table{display:table!important}.d-table-row{display:table-row!important}.d-table-cell{display:table-cell!important}.d-flex{display:-ms-flexbox!important;display:flex!important}.d-inline-flex{display:-ms-inline-flexbox!important;display:inline-flex!important}@media (min-width:576px){.d-sm-none{display:none!important}.d-sm-inline{display:inline!important}.d-sm-inline-block{display:inline-block!important}.d-sm-block{display:block!important}.d-sm-table{display:table!important}.d-sm-table-row{display:table-row!important}.d-sm-table-cell{display:table-cell!important}.d-sm-flex{display:-ms-flexbox!important;display:flex!important}.d-sm-inline-flex{display:-ms-inline-flexbox!important;display:inline-flex!important}}@media (min-width:768px){.d-md-none{display:none!important}.d-md-inline{display:inline!important}.d-md-inline-block{display:inline-block!important}.d-md-block{display:block!important}.d-md-table{display:table!important}.d-md-table-row{display:table-row!important}.d-md-table-cell{display:table-cell!important}.d-md-flex{display:-ms-flexbox!important;display:flex!important}.d-md-inline-flex{display:-ms-inline-flexbox!important;display:inline-flex!important}}@media (min-width:992px){.d-lg-none{display:none!important}.d-lg-inline{display:inline!important}.d-lg-inline-block{display:inline-block!important}.d-lg-block{display:block!important}.d-lg-table{display:table!important}.d-lg-table-row{display:table-row!important}.d-lg-table-cell{display:table-cell!important}.d-lg-flex{display:-ms-flexbox!important;display:flex!important}.d-lg-inline-flex{display:-ms-inline-flexbox!important;display:inline-flex!important}}@media (min-width:1200px){.d-xl-none{display:none!important}.d-xl-inline{display:inline!important}.d-xl-inline-block{display:inline-block!important}.d-xl-block{display:block!important}.d-xl-table{display:table!important}.d-xl-table-row{display:table-row!important}.d-xl-table-cell{display:table-cell!important}.d-xl-flex{display:-ms-flexbox!important;display:flex!important}.d-xl-inline-flex{display:-ms-inline-flexbox!important;display:inline-flex!important}}@media print{.d-print-none{display:none!important}.d-print-inline{display:inline!important}.d-print-inline-block{display:inline-block!important}.d-print-block{display:block!important}.d-print-table{display:table!important}.d-print-table-row{display:table-row!important}.d-print-table-cell{display:table-cell!important}.d-print-flex{display:-ms-flexbox!important;display:flex!important}.d-print-inline-flex{display:-ms-inline-flexbox!important;display:inline-flex!important}}.embed-responsive{position:relative;display:block;width:100%;padding:0;overflow:hidden}.embed-responsive::before{display:block;content:""}.embed-responsive .embed-responsive-item,.embed-responsive embed,.embed-responsive iframe,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-21by9::before{padding-top:42.857143%}.embed-responsive-16by9::before{padding-top:56.25%}.embed-responsive-4by3::before{padding-top:75%}.embed-responsive-1by1::before{padding-top:100%}.flex-row{-ms-flex-direction:row!important;flex-direction:row!important}.flex-column{-ms-flex-direction:column!important;flex-direction:column!important}.flex-row-reverse{-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-column-reverse{-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-wrap{-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-nowrap{-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-wrap-reverse{-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.flex-fill{-ms-flex:1 1 auto!important;flex:1 1 auto!important}.flex-grow-0{-ms-flex-positive:0!important;flex-grow:0!important}.flex-grow-1{-ms-flex-positive:1!important;flex-grow:1!important}.flex-shrink-0{-ms-flex-negative:0!important;flex-shrink:0!important}.flex-shrink-1{-ms-flex-negative:1!important;flex-shrink:1!important}.justify-content-start{-ms-flex-pack:start!important;justify-content:flex-start!important}.justify-content-end{-ms-flex-pack:end!important;justify-content:flex-end!important}.justify-content-center{-ms-flex-pack:center!important;justify-content:center!important}.justify-content-between{-ms-flex-pack:justify!important;justify-content:space-between!important}.justify-content-around{-ms-flex-pack:distribute!important;justify-content:space-around!important}.align-items-start{-ms-flex-align:start!important;align-items:flex-start!important}.align-items-end{-ms-flex-align:end!important;align-items:flex-end!important}.align-items-center{-ms-flex-align:center!important;align-items:center!important}.align-items-baseline{-ms-flex-align:baseline!important;align-items:baseline!important}.align-items-stretch{-ms-flex-align:stretch!important;align-items:stretch!important}.align-content-start{-ms-flex-line-pack:start!important;align-content:flex-start!important}.align-content-end{-ms-flex-line-pack:end!important;align-content:flex-end!important}.align-content-center{-ms-flex-line-pack:center!important;align-content:center!important}.align-content-between{-ms-flex-line-pack:justify!important;align-content:space-between!important}.align-content-around{-ms-flex-line-pack:distribute!important;align-content:space-around!important}.align-content-stretch{-ms-flex-line-pack:stretch!important;align-content:stretch!important}.align-self-auto{-ms-flex-item-align:auto!important;align-self:auto!important}.align-self-start{-ms-flex-item-align:start!important;align-self:flex-start!important}.align-self-end{-ms-flex-item-align:end!important;align-self:flex-end!important}.align-self-center{-ms-flex-item-align:center!important;align-self:center!important}.align-self-baseline{-ms-flex-item-align:baseline!important;align-self:baseline!important}.align-self-stretch{-ms-flex-item-align:stretch!important;align-self:stretch!important}@media (min-width:576px){.flex-sm-row{-ms-flex-direction:row!important;flex-direction:row!important}.flex-sm-column{-ms-flex-direction:column!important;flex-direction:column!important}.flex-sm-row-reverse{-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-sm-column-reverse{-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-sm-wrap{-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-sm-nowrap{-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-sm-wrap-reverse{-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.flex-sm-fill{-ms-flex:1 1 auto!important;flex:1 1 auto!important}.flex-sm-grow-0{-ms-flex-positive:0!important;flex-grow:0!important}.flex-sm-grow-1{-ms-flex-positive:1!important;flex-grow:1!important}.flex-sm-shrink-0{-ms-flex-negative:0!important;flex-shrink:0!important}.flex-sm-shrink-1{-ms-flex-negative:1!important;flex-shrink:1!important}.justify-content-sm-start{-ms-flex-pack:start!important;justify-content:flex-start!important}.justify-content-sm-end{-ms-flex-pack:end!important;justify-content:flex-end!important}.justify-content-sm-center{-ms-flex-pack:center!important;justify-content:center!important}.justify-content-sm-between{-ms-flex-pack:justify!important;justify-content:space-between!important}.justify-content-sm-around{-ms-flex-pack:distribute!important;justify-content:space-around!important}.align-items-sm-start{-ms-flex-align:start!important;align-items:flex-start!important}.align-items-sm-end{-ms-flex-align:end!important;align-items:flex-end!important}.align-items-sm-center{-ms-flex-align:center!important;align-items:center!important}.align-items-sm-baseline{-ms-flex-align:baseline!important;align-items:baseline!important}.align-items-sm-stretch{-ms-flex-align:stretch!important;align-items:stretch!important}.align-content-sm-start{-ms-flex-line-pack:start!important;align-content:flex-start!important}.align-content-sm-end{-ms-flex-line-pack:end!important;align-content:flex-end!important}.align-content-sm-center{-ms-flex-line-pack:center!important;align-content:center!important}.align-content-sm-between{-ms-flex-line-pack:justify!important;align-content:space-between!important}.align-content-sm-around{-ms-flex-line-pack:distribute!important;align-content:space-around!important}.align-content-sm-stretch{-ms-flex-line-pack:stretch!important;align-content:stretch!important}.align-self-sm-auto{-ms-flex-item-align:auto!important;align-self:auto!important}.align-self-sm-start{-ms-flex-item-align:start!important;align-self:flex-start!important}.align-self-sm-end{-ms-flex-item-align:end!important;align-self:flex-end!important}.align-self-sm-center{-ms-flex-item-align:center!important;align-self:center!important}.align-self-sm-baseline{-ms-flex-item-align:baseline!important;align-self:baseline!important}.align-self-sm-stretch{-ms-flex-item-align:stretch!important;align-self:stretch!important}}@media (min-width:768px){.flex-md-row{-ms-flex-direction:row!important;flex-direction:row!important}.flex-md-column{-ms-flex-direction:column!important;flex-direction:column!important}.flex-md-row-reverse{-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-md-column-reverse{-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-md-wrap{-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-md-nowrap{-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-md-wrap-reverse{-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.flex-md-fill{-ms-flex:1 1 auto!important;flex:1 1 auto!important}.flex-md-grow-0{-ms-flex-positive:0!important;flex-grow:0!important}.flex-md-grow-1{-ms-flex-positive:1!important;flex-grow:1!important}.flex-md-shrink-0{-ms-flex-negative:0!important;flex-shrink:0!important}.flex-md-shrink-1{-ms-flex-negative:1!important;flex-shrink:1!important}.justify-content-md-start{-ms-flex-pack:start!important;justify-content:flex-start!important}.justify-content-md-end{-ms-flex-pack:end!important;justify-content:flex-end!important}.justify-content-md-center{-ms-flex-pack:center!important;justify-content:center!important}.justify-content-md-between{-ms-flex-pack:justify!important;justify-content:space-between!important}.justify-content-md-around{-ms-flex-pack:distribute!important;justify-content:space-around!important}.align-items-md-start{-ms-flex-align:start!important;align-items:flex-start!important}.align-items-md-end{-ms-flex-align:end!important;align-items:flex-end!important}.align-items-md-center{-ms-flex-align:center!important;align-items:center!important}.align-items-md-baseline{-ms-flex-align:baseline!important;align-items:baseline!important}.align-items-md-stretch{-ms-flex-align:stretch!important;align-items:stretch!important}.align-content-md-start{-ms-flex-line-pack:start!important;align-content:flex-start!important}.align-content-md-end{-ms-flex-line-pack:end!important;align-content:flex-end!important}.align-content-md-center{-ms-flex-line-pack:center!important;align-content:center!important}.align-content-md-between{-ms-flex-line-pack:justify!important;align-content:space-between!important}.align-content-md-around{-ms-flex-line-pack:distribute!important;align-content:space-around!important}.align-content-md-stretch{-ms-flex-line-pack:stretch!important;align-content:stretch!important}.align-self-md-auto{-ms-flex-item-align:auto!important;align-self:auto!important}.align-self-md-start{-ms-flex-item-align:start!important;align-self:flex-start!important}.align-self-md-end{-ms-flex-item-align:end!important;align-self:flex-end!important}.align-self-md-center{-ms-flex-item-align:center!important;align-self:center!important}.align-self-md-baseline{-ms-flex-item-align:baseline!important;align-self:baseline!important}.align-self-md-stretch{-ms-flex-item-align:stretch!important;align-self:stretch!important}}@media (min-width:992px){.flex-lg-row{-ms-flex-direction:row!important;flex-direction:row!important}.flex-lg-column{-ms-flex-direction:column!important;flex-direction:column!important}.flex-lg-row-reverse{-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-lg-column-reverse{-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-lg-wrap{-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-lg-nowrap{-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-lg-wrap-reverse{-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.flex-lg-fill{-ms-flex:1 1 auto!important;flex:1 1 auto!important}.flex-lg-grow-0{-ms-flex-positive:0!important;flex-grow:0!important}.flex-lg-grow-1{-ms-flex-positive:1!important;flex-grow:1!important}.flex-lg-shrink-0{-ms-flex-negative:0!important;flex-shrink:0!important}.flex-lg-shrink-1{-ms-flex-negative:1!important;flex-shrink:1!important}.justify-content-lg-start{-ms-flex-pack:start!important;justify-content:flex-start!important}.justify-content-lg-end{-ms-flex-pack:end!important;justify-content:flex-end!important}.justify-content-lg-center{-ms-flex-pack:center!important;justify-content:center!important}.justify-content-lg-between{-ms-flex-pack:justify!important;justify-content:space-between!important}.justify-content-lg-around{-ms-flex-pack:distribute!important;justify-content:space-around!important}.align-items-lg-start{-ms-flex-align:start!important;align-items:flex-start!important}.align-items-lg-end{-ms-flex-align:end!important;align-items:flex-end!important}.align-items-lg-center{-ms-flex-align:center!important;align-items:center!important}.align-items-lg-baseline{-ms-flex-align:baseline!important;align-items:baseline!important}.align-items-lg-stretch{-ms-flex-align:stretch!important;align-items:stretch!important}.align-content-lg-start{-ms-flex-line-pack:start!important;align-content:flex-start!important}.align-content-lg-end{-ms-flex-line-pack:end!important;align-content:flex-end!important}.align-content-lg-center{-ms-flex-line-pack:center!important;align-content:center!important}.align-content-lg-between{-ms-flex-line-pack:justify!important;align-content:space-between!important}.align-content-lg-around{-ms-flex-line-pack:distribute!important;align-content:space-around!important}.align-content-lg-stretch{-ms-flex-line-pack:stretch!important;align-content:stretch!important}.align-self-lg-auto{-ms-flex-item-align:auto!important;align-self:auto!important}.align-self-lg-start{-ms-flex-item-align:start!important;align-self:flex-start!important}.align-self-lg-end{-ms-flex-item-align:end!important;align-self:flex-end!important}.align-self-lg-center{-ms-flex-item-align:center!important;align-self:center!important}.align-self-lg-baseline{-ms-flex-item-align:baseline!important;align-self:baseline!important}.align-self-lg-stretch{-ms-flex-item-align:stretch!important;align-self:stretch!important}}@media (min-width:1200px){.flex-xl-row{-ms-flex-direction:row!important;flex-direction:row!important}.flex-xl-column{-ms-flex-direction:column!important;flex-direction:column!important}.flex-xl-row-reverse{-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-xl-column-reverse{-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-xl-wrap{-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-xl-nowrap{-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-xl-wrap-reverse{-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.flex-xl-fill{-ms-flex:1 1 auto!important;flex:1 1 auto!important}.flex-xl-grow-0{-ms-flex-positive:0!important;flex-grow:0!important}.flex-xl-grow-1{-ms-flex-positive:1!important;flex-grow:1!important}.flex-xl-shrink-0{-ms-flex-negative:0!important;flex-shrink:0!important}.flex-xl-shrink-1{-ms-flex-negative:1!important;flex-shrink:1!important}.justify-content-xl-start{-ms-flex-pack:start!important;justify-content:flex-start!important}.justify-content-xl-end{-ms-flex-pack:end!important;justify-content:flex-end!important}.justify-content-xl-center{-ms-flex-pack:center!important;justify-content:center!important}.justify-content-xl-between{-ms-flex-pack:justify!important;justify-content:space-between!important}.justify-content-xl-around{-ms-flex-pack:distribute!important;justify-content:space-around!important}.align-items-xl-start{-ms-flex-align:start!important;align-items:flex-start!important}.align-items-xl-end{-ms-flex-align:end!important;align-items:flex-end!important}.align-items-xl-center{-ms-flex-align:center!important;align-items:center!important}.align-items-xl-baseline{-ms-flex-align:baseline!important;align-items:baseline!important}.align-items-xl-stretch{-ms-flex-align:stretch!important;align-items:stretch!important}.align-content-xl-start{-ms-flex-line-pack:start!important;align-content:flex-start!important}.align-content-xl-end{-ms-flex-line-pack:end!important;align-content:flex-end!important}.align-content-xl-center{-ms-flex-line-pack:center!important;align-content:center!important}.align-content-xl-between{-ms-flex-line-pack:justify!important;align-content:space-between!important}.align-content-xl-around{-ms-flex-line-pack:distribute!important;align-content:space-around!important}.align-content-xl-stretch{-ms-flex-line-pack:stretch!important;align-content:stretch!important}.align-self-xl-auto{-ms-flex-item-align:auto!important;align-self:auto!important}.align-self-xl-start{-ms-flex-item-align:start!important;align-self:flex-start!important}.align-self-xl-end{-ms-flex-item-align:end!important;align-self:flex-end!important}.align-self-xl-center{-ms-flex-item-align:center!important;align-self:center!important}.align-self-xl-baseline{-ms-flex-item-align:baseline!important;align-self:baseline!important}.align-self-xl-stretch{-ms-flex-item-align:stretch!important;align-self:stretch!important}}.float-left{float:left!important}.float-right{float:right!important}.float-none{float:none!important}@media (min-width:576px){.float-sm-left{float:left!important}.float-sm-right{float:right!important}.float-sm-none{float:none!important}}@media (min-width:768px){.float-md-left{float:left!important}.float-md-right{float:right!important}.float-md-none{float:none!important}}@media (min-width:992px){.float-lg-left{float:left!important}.float-lg-right{float:right!important}.float-lg-none{float:none!important}}@media (min-width:1200px){.float-xl-left{float:left!important}.float-xl-right{float:right!important}.float-xl-none{float:none!important}}.position-static{position:static!important}.position-relative{position:relative!important}.position-absolute{position:absolute!important}.position-fixed{position:fixed!important}.position-sticky{position:-webkit-sticky!important;position:sticky!important}.fixed-top{position:fixed;top:0;right:0;left:0;z-index:1030}.fixed-bottom{position:fixed;right:0;bottom:0;left:0;z-index:1030}@supports ((position:-webkit-sticky) or (position:sticky)){.sticky-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}}.sr-only{position:absolute;width:1px;height:1px;padding:0;overflow:hidden;clip:rect(0,0,0,0);white-space:nowrap;border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;overflow:visible;clip:auto;white-space:normal}.shadow-sm{box-shadow:0 .125rem .25rem rgba(0,0,0,.075)!important}.shadow{box-shadow:0 .5rem 1rem rgba(0,0,0,.15)!important}.shadow-lg{box-shadow:0 1rem 3rem rgba(0,0,0,.175)!important}.shadow-none{box-shadow:none!important}.w-25{width:25%!important}.w-50{width:50%!important}.w-75{width:75%!important}.w-100{width:100%!important}.w-auto{width:auto!important}.h-25{height:25%!important}.h-50{height:50%!important}.h-75{height:75%!important}.h-100{height:100%!important}.h-auto{height:auto!important}.mw-100{max-width:100%!important}.mh-100{max-height:100%!important}.m-0{margin:0!important}.mt-0,.my-0{margin-top:0!important}.mr-0,.mx-0{margin-right:0!important}.mb-0,.my-0{margin-bottom:0!important}.ml-0,.mx-0{margin-left:0!important}.m-1{margin:.25rem!important}.mt-1,.my-1{margin-top:.25rem!important}.mr-1,.mx-1{margin-right:.25rem!important}.mb-1,.my-1{margin-bottom:.25rem!important}.ml-1,.mx-1{margin-left:.25rem!important}.m-2{margin:.5rem!important}.mt-2,.my-2{margin-top:.5rem!important}.mr-2,.mx-2{margin-right:.5rem!important}.mb-2,.my-2{margin-bottom:.5rem!important}.ml-2,.mx-2{margin-left:.5rem!important}.m-3{margin:1rem!important}.mt-3,.my-3{margin-top:1rem!important}.mr-3,.mx-3{margin-right:1rem!important}.mb-3,.my-3{margin-bottom:1rem!important}.ml-3,.mx-3{margin-left:1rem!important}.m-4{margin:1.5rem!important}.mt-4,.my-4{margin-top:1.5rem!important}.mr-4,.mx-4{margin-right:1.5rem!important}.mb-4,.my-4{margin-bottom:1.5rem!important}.ml-4,.mx-4{margin-left:1.5rem!important}.m-5{margin:3rem!important}.mt-5,.my-5{margin-top:3rem!important}.mr-5,.mx-5{margin-right:3rem!important}.mb-5,.my-5{margin-bottom:3rem!important}.ml-5,.mx-5{margin-left:3rem!important}.p-0{padding:0!important}.pt-0,.py-0{padding-top:0!important}.pr-0,.px-0{padding-right:0!important}.pb-0,.py-0{padding-bottom:0!important}.pl-0,.px-0{padding-left:0!important}.p-1{padding:.25rem!important}.pt-1,.py-1{padding-top:.25rem!important}.pr-1,.px-1{padding-right:.25rem!important}.pb-1,.py-1{padding-bottom:.25rem!important}.pl-1,.px-1{padding-left:.25rem!important}.p-2{padding:.5rem!important}.pt-2,.py-2{padding-top:.5rem!important}.pr-2,.px-2{padding-right:.5rem!important}.pb-2,.py-2{padding-bottom:.5rem!important}.pl-2,.px-2{padding-left:.5rem!important}.p-3{padding:1rem!important}.pt-3,.py-3{padding-top:1rem!important}.pr-3,.px-3{padding-right:1rem!important}.pb-3,.py-3{padding-bottom:1rem!important}.pl-3,.px-3{padding-left:1rem!important}.p-4{padding:1.5rem!important}.pt-4,.py-4{padding-top:1.5rem!important}.pr-4,.px-4{padding-right:1.5rem!important}.pb-4,.py-4{padding-bottom:1.5rem!important}.pl-4,.px-4{padding-left:1.5rem!important}.p-5{padding:3rem!important}.pt-5,.py-5{padding-top:3rem!important}.pr-5,.px-5{padding-right:3rem!important}.pb-5,.py-5{padding-bottom:3rem!important}.pl-5,.px-5{padding-left:3rem!important}.m-auto{margin:auto!important}.mt-auto,.my-auto{margin-top:auto!important}.mr-auto,.mx-auto{margin-right:auto!important}.mb-auto,.my-auto{margin-bottom:auto!important}.ml-auto,.mx-auto{margin-left:auto!important}@media (min-width:576px){.m-sm-0{margin:0!important}.mt-sm-0,.my-sm-0{margin-top:0!important}.mr-sm-0,.mx-sm-0{margin-right:0!important}.mb-sm-0,.my-sm-0{margin-bottom:0!important}.ml-sm-0,.mx-sm-0{margin-left:0!important}.m-sm-1{margin:.25rem!important}.mt-sm-1,.my-sm-1{margin-top:.25rem!important}.mr-sm-1,.mx-sm-1{margin-right:.25rem!important}.mb-sm-1,.my-sm-1{margin-bottom:.25rem!important}.ml-sm-1,.mx-sm-1{margin-left:.25rem!important}.m-sm-2{margin:.5rem!important}.mt-sm-2,.my-sm-2{margin-top:.5rem!important}.mr-sm-2,.mx-sm-2{margin-right:.5rem!important}.mb-sm-2,.my-sm-2{margin-bottom:.5rem!important}.ml-sm-2,.mx-sm-2{margin-left:.5rem!important}.m-sm-3{margin:1rem!important}.mt-sm-3,.my-sm-3{margin-top:1rem!important}.mr-sm-3,.mx-sm-3{margin-right:1rem!important}.mb-sm-3,.my-sm-3{margin-bottom:1rem!important}.ml-sm-3,.mx-sm-3{margin-left:1rem!important}.m-sm-4{margin:1.5rem!important}.mt-sm-4,.my-sm-4{margin-top:1.5rem!important}.mr-sm-4,.mx-sm-4{margin-right:1.5rem!important}.mb-sm-4,.my-sm-4{margin-bottom:1.5rem!important}.ml-sm-4,.mx-sm-4{margin-left:1.5rem!important}.m-sm-5{margin:3rem!important}.mt-sm-5,.my-sm-5{margin-top:3rem!important}.mr-sm-5,.mx-sm-5{margin-right:3rem!important}.mb-sm-5,.my-sm-5{margin-bottom:3rem!important}.ml-sm-5,.mx-sm-5{margin-left:3rem!important}.p-sm-0{padding:0!important}.pt-sm-0,.py-sm-0{padding-top:0!important}.pr-sm-0,.px-sm-0{padding-right:0!important}.pb-sm-0,.py-sm-0{padding-bottom:0!important}.pl-sm-0,.px-sm-0{padding-left:0!important}.p-sm-1{padding:.25rem!important}.pt-sm-1,.py-sm-1{padding-top:.25rem!important}.pr-sm-1,.px-sm-1{padding-right:.25rem!important}.pb-sm-1,.py-sm-1{padding-bottom:.25rem!important}.pl-sm-1,.px-sm-1{padding-left:.25rem!important}.p-sm-2{padding:.5rem!important}.pt-sm-2,.py-sm-2{padding-top:.5rem!important}.pr-sm-2,.px-sm-2{padding-right:.5rem!important}.pb-sm-2,.py-sm-2{padding-bottom:.5rem!important}.pl-sm-2,.px-sm-2{padding-left:.5rem!important}.p-sm-3{padding:1rem!important}.pt-sm-3,.py-sm-3{padding-top:1rem!important}.pr-sm-3,.px-sm-3{padding-right:1rem!important}.pb-sm-3,.py-sm-3{padding-bottom:1rem!important}.pl-sm-3,.px-sm-3{padding-left:1rem!important}.p-sm-4{padding:1.5rem!important}.pt-sm-4,.py-sm-4{padding-top:1.5rem!important}.pr-sm-4,.px-sm-4{padding-right:1.5rem!important}.pb-sm-4,.py-sm-4{padding-bottom:1.5rem!important}.pl-sm-4,.px-sm-4{padding-left:1.5rem!important}.p-sm-5{padding:3rem!important}.pt-sm-5,.py-sm-5{padding-top:3rem!important}.pr-sm-5,.px-sm-5{padding-right:3rem!important}.pb-sm-5,.py-sm-5{padding-bottom:3rem!important}.pl-sm-5,.px-sm-5{padding-left:3rem!important}.m-sm-auto{margin:auto!important}.mt-sm-auto,.my-sm-auto{margin-top:auto!important}.mr-sm-auto,.mx-sm-auto{margin-right:auto!important}.mb-sm-auto,.my-sm-auto{margin-bottom:auto!important}.ml-sm-auto,.mx-sm-auto{margin-left:auto!important}}@media (min-width:768px){.m-md-0{margin:0!important}.mt-md-0,.my-md-0{margin-top:0!important}.mr-md-0,.mx-md-0{margin-right:0!important}.mb-md-0,.my-md-0{margin-bottom:0!important}.ml-md-0,.mx-md-0{margin-left:0!important}.m-md-1{margin:.25rem!important}.mt-md-1,.my-md-1{margin-top:.25rem!important}.mr-md-1,.mx-md-1{margin-right:.25rem!important}.mb-md-1,.my-md-1{margin-bottom:.25rem!important}.ml-md-1,.mx-md-1{margin-left:.25rem!important}.m-md-2{margin:.5rem!important}.mt-md-2,.my-md-2{margin-top:.5rem!important}.mr-md-2,.mx-md-2{margin-right:.5rem!important}.mb-md-2,.my-md-2{margin-bottom:.5rem!important}.ml-md-2,.mx-md-2{margin-left:.5rem!important}.m-md-3{margin:1rem!important}.mt-md-3,.my-md-3{margin-top:1rem!important}.mr-md-3,.mx-md-3{margin-right:1rem!important}.mb-md-3,.my-md-3{margin-bottom:1rem!important}.ml-md-3,.mx-md-3{margin-left:1rem!important}.m-md-4{margin:1.5rem!important}.mt-md-4,.my-md-4{margin-top:1.5rem!important}.mr-md-4,.mx-md-4{margin-right:1.5rem!important}.mb-md-4,.my-md-4{margin-bottom:1.5rem!important}.ml-md-4,.mx-md-4{margin-left:1.5rem!important}.m-md-5{margin:3rem!important}.mt-md-5,.my-md-5{margin-top:3rem!important}.mr-md-5,.mx-md-5{margin-right:3rem!important}.mb-md-5,.my-md-5{margin-bottom:3rem!important}.ml-md-5,.mx-md-5{margin-left:3rem!important}.p-md-0{padding:0!important}.pt-md-0,.py-md-0{padding-top:0!important}.pr-md-0,.px-md-0{padding-right:0!important}.pb-md-0,.py-md-0{padding-bottom:0!important}.pl-md-0,.px-md-0{padding-left:0!important}.p-md-1{padding:.25rem!important}.pt-md-1,.py-md-1{padding-top:.25rem!important}.pr-md-1,.px-md-1{padding-right:.25rem!important}.pb-md-1,.py-md-1{padding-bottom:.25rem!important}.pl-md-1,.px-md-1{padding-left:.25rem!important}.p-md-2{padding:.5rem!important}.pt-md-2,.py-md-2{padding-top:.5rem!important}.pr-md-2,.px-md-2{padding-right:.5rem!important}.pb-md-2,.py-md-2{padding-bottom:.5rem!important}.pl-md-2,.px-md-2{padding-left:.5rem!important}.p-md-3{padding:1rem!important}.pt-md-3,.py-md-3{padding-top:1rem!important}.pr-md-3,.px-md-3{padding-right:1rem!important}.pb-md-3,.py-md-3{padding-bottom:1rem!important}.pl-md-3,.px-md-3{padding-left:1rem!important}.p-md-4{padding:1.5rem!important}.pt-md-4,.py-md-4{padding-top:1.5rem!important}.pr-md-4,.px-md-4{padding-right:1.5rem!important}.pb-md-4,.py-md-4{padding-bottom:1.5rem!important}.pl-md-4,.px-md-4{padding-left:1.5rem!important}.p-md-5{padding:3rem!important}.pt-md-5,.py-md-5{padding-top:3rem!important}.pr-md-5,.px-md-5{padding-right:3rem!important}.pb-md-5,.py-md-5{padding-bottom:3rem!important}.pl-md-5,.px-md-5{padding-left:3rem!important}.m-md-auto{margin:auto!important}.mt-md-auto,.my-md-auto{margin-top:auto!important}.mr-md-auto,.mx-md-auto{margin-right:auto!important}.mb-md-auto,.my-md-auto{margin-bottom:auto!important}.ml-md-auto,.mx-md-auto{margin-left:auto!important}}@media (min-width:992px){.m-lg-0{margin:0!important}.mt-lg-0,.my-lg-0{margin-top:0!important}.mr-lg-0,.mx-lg-0{margin-right:0!important}.mb-lg-0,.my-lg-0{margin-bottom:0!important}.ml-lg-0,.mx-lg-0{margin-left:0!important}.m-lg-1{margin:.25rem!important}.mt-lg-1,.my-lg-1{margin-top:.25rem!important}.mr-lg-1,.mx-lg-1{margin-right:.25rem!important}.mb-lg-1,.my-lg-1{margin-bottom:.25rem!important}.ml-lg-1,.mx-lg-1{margin-left:.25rem!important}.m-lg-2{margin:.5rem!important}.mt-lg-2,.my-lg-2{margin-top:.5rem!important}.mr-lg-2,.mx-lg-2{margin-right:.5rem!important}.mb-lg-2,.my-lg-2{margin-bottom:.5rem!important}.ml-lg-2,.mx-lg-2{margin-left:.5rem!important}.m-lg-3{margin:1rem!important}.mt-lg-3,.my-lg-3{margin-top:1rem!important}.mr-lg-3,.mx-lg-3{margin-right:1rem!important}.mb-lg-3,.my-lg-3{margin-bottom:1rem!important}.ml-lg-3,.mx-lg-3{margin-left:1rem!important}.m-lg-4{margin:1.5rem!important}.mt-lg-4,.my-lg-4{margin-top:1.5rem!important}.mr-lg-4,.mx-lg-4{margin-right:1.5rem!important}.mb-lg-4,.my-lg-4{margin-bottom:1.5rem!important}.ml-lg-4,.mx-lg-4{margin-left:1.5rem!important}.m-lg-5{margin:3rem!important}.mt-lg-5,.my-lg-5{margin-top:3rem!important}.mr-lg-5,.mx-lg-5{margin-right:3rem!important}.mb-lg-5,.my-lg-5{margin-bottom:3rem!important}.ml-lg-5,.mx-lg-5{margin-left:3rem!important}.p-lg-0{padding:0!important}.pt-lg-0,.py-lg-0{padding-top:0!important}.pr-lg-0,.px-lg-0{padding-right:0!important}.pb-lg-0,.py-lg-0{padding-bottom:0!important}.pl-lg-0,.px-lg-0{padding-left:0!important}.p-lg-1{padding:.25rem!important}.pt-lg-1,.py-lg-1{padding-top:.25rem!important}.pr-lg-1,.px-lg-1{padding-right:.25rem!important}.pb-lg-1,.py-lg-1{padding-bottom:.25rem!important}.pl-lg-1,.px-lg-1{padding-left:.25rem!important}.p-lg-2{padding:.5rem!important}.pt-lg-2,.py-lg-2{padding-top:.5rem!important}.pr-lg-2,.px-lg-2{padding-right:.5rem!important}.pb-lg-2,.py-lg-2{padding-bottom:.5rem!important}.pl-lg-2,.px-lg-2{padding-left:.5rem!important}.p-lg-3{padding:1rem!important}.pt-lg-3,.py-lg-3{padding-top:1rem!important}.pr-lg-3,.px-lg-3{padding-right:1rem!important}.pb-lg-3,.py-lg-3{padding-bottom:1rem!important}.pl-lg-3,.px-lg-3{padding-left:1rem!important}.p-lg-4{padding:1.5rem!important}.pt-lg-4,.py-lg-4{padding-top:1.5rem!important}.pr-lg-4,.px-lg-4{padding-right:1.5rem!important}.pb-lg-4,.py-lg-4{padding-bottom:1.5rem!important}.pl-lg-4,.px-lg-4{padding-left:1.5rem!important}.p-lg-5{padding:3rem!important}.pt-lg-5,.py-lg-5{padding-top:3rem!important}.pr-lg-5,.px-lg-5{padding-right:3rem!important}.pb-lg-5,.py-lg-5{padding-bottom:3rem!important}.pl-lg-5,.px-lg-5{padding-left:3rem!important}.m-lg-auto{margin:auto!important}.mt-lg-auto,.my-lg-auto{margin-top:auto!important}.mr-lg-auto,.mx-lg-auto{margin-right:auto!important}.mb-lg-auto,.my-lg-auto{margin-bottom:auto!important}.ml-lg-auto,.mx-lg-auto{margin-left:auto!important}}@media (min-width:1200px){.m-xl-0{margin:0!important}.mt-xl-0,.my-xl-0{margin-top:0!important}.mr-xl-0,.mx-xl-0{margin-right:0!important}.mb-xl-0,.my-xl-0{margin-bottom:0!important}.ml-xl-0,.mx-xl-0{margin-left:0!important}.m-xl-1{margin:.25rem!important}.mt-xl-1,.my-xl-1{margin-top:.25rem!important}.mr-xl-1,.mx-xl-1{margin-right:.25rem!important}.mb-xl-1,.my-xl-1{margin-bottom:.25rem!important}.ml-xl-1,.mx-xl-1{margin-left:.25rem!important}.m-xl-2{margin:.5rem!important}.mt-xl-2,.my-xl-2{margin-top:.5rem!important}.mr-xl-2,.mx-xl-2{margin-right:.5rem!important}.mb-xl-2,.my-xl-2{margin-bottom:.5rem!important}.ml-xl-2,.mx-xl-2{margin-left:.5rem!important}.m-xl-3{margin:1rem!important}.mt-xl-3,.my-xl-3{margin-top:1rem!important}.mr-xl-3,.mx-xl-3{margin-right:1rem!important}.mb-xl-3,.my-xl-3{margin-bottom:1rem!important}.ml-xl-3,.mx-xl-3{margin-left:1rem!important}.m-xl-4{margin:1.5rem!important}.mt-xl-4,.my-xl-4{margin-top:1.5rem!important}.mr-xl-4,.mx-xl-4{margin-right:1.5rem!important}.mb-xl-4,.my-xl-4{margin-bottom:1.5rem!important}.ml-xl-4,.mx-xl-4{margin-left:1.5rem!important}.m-xl-5{margin:3rem!important}.mt-xl-5,.my-xl-5{margin-top:3rem!important}.mr-xl-5,.mx-xl-5{margin-right:3rem!important}.mb-xl-5,.my-xl-5{margin-bottom:3rem!important}.ml-xl-5,.mx-xl-5{margin-left:3rem!important}.p-xl-0{padding:0!important}.pt-xl-0,.py-xl-0{padding-top:0!important}.pr-xl-0,.px-xl-0{padding-right:0!important}.pb-xl-0,.py-xl-0{padding-bottom:0!important}.pl-xl-0,.px-xl-0{padding-left:0!important}.p-xl-1{padding:.25rem!important}.pt-xl-1,.py-xl-1{padding-top:.25rem!important}.pr-xl-1,.px-xl-1{padding-right:.25rem!important}.pb-xl-1,.py-xl-1{padding-bottom:.25rem!important}.pl-xl-1,.px-xl-1{padding-left:.25rem!important}.p-xl-2{padding:.5rem!important}.pt-xl-2,.py-xl-2{padding-top:.5rem!important}.pr-xl-2,.px-xl-2{padding-right:.5rem!important}.pb-xl-2,.py-xl-2{padding-bottom:.5rem!important}.pl-xl-2,.px-xl-2{padding-left:.5rem!important}.p-xl-3{padding:1rem!important}.pt-xl-3,.py-xl-3{padding-top:1rem!important}.pr-xl-3,.px-xl-3{padding-right:1rem!important}.pb-xl-3,.py-xl-3{padding-bottom:1rem!important}.pl-xl-3,.px-xl-3{padding-left:1rem!important}.p-xl-4{padding:1.5rem!important}.pt-xl-4,.py-xl-4{padding-top:1.5rem!important}.pr-xl-4,.px-xl-4{padding-right:1.5rem!important}.pb-xl-4,.py-xl-4{padding-bottom:1.5rem!important}.pl-xl-4,.px-xl-4{padding-left:1.5rem!important}.p-xl-5{padding:3rem!important}.pt-xl-5,.py-xl-5{padding-top:3rem!important}.pr-xl-5,.px-xl-5{padding-right:3rem!important}.pb-xl-5,.py-xl-5{padding-bottom:3rem!important}.pl-xl-5,.px-xl-5{padding-left:3rem!important}.m-xl-auto{margin:auto!important}.mt-xl-auto,.my-xl-auto{margin-top:auto!important}.mr-xl-auto,.mx-xl-auto{margin-right:auto!important}.mb-xl-auto,.my-xl-auto{margin-bottom:auto!important}.ml-xl-auto,.mx-xl-auto{margin-left:auto!important}}.text-monospace{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace}.text-justify{text-align:justify!important}.text-nowrap{white-space:nowrap!important}.text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.text-left{text-align:left!important}.text-right{text-align:right!important}.text-center{text-align:center!important}@media (min-width:576px){.text-sm-left{text-align:left!important}.text-sm-right{text-align:right!important}.text-sm-center{text-align:center!important}}@media (min-width:768px){.text-md-left{text-align:left!important}.text-md-right{text-align:right!important}.text-md-center{text-align:center!important}}@media (min-width:992px){.text-lg-left{text-align:left!important}.text-lg-right{text-align:right!important}.text-lg-center{text-align:center!important}}@media (min-width:1200px){.text-xl-left{text-align:left!important}.text-xl-right{text-align:right!important}.text-xl-center{text-align:center!important}}.text-lowercase{text-transform:lowercase!important}.text-uppercase{text-transform:uppercase!important}.text-capitalize{text-transform:capitalize!important}.font-weight-light{font-weight:300!important}.font-weight-normal{font-weight:400!important}.font-weight-bold{font-weight:700!important}.font-italic{font-style:italic!important}.text-white{color:#fff!important}.text-primary{color:#007bff!important}a.text-primary:focus,a.text-primary:hover{color:#0062cc!important}.text-secondary{color:#6c757d!important}a.text-secondary:focus,a.text-secondary:hover{color:#545b62!important}.text-success{color:#28a745!important}a.text-success:focus,a.text-success:hover{color:#1e7e34!important}.text-info{color:#17a2b8!important}a.text-info:focus,a.text-info:hover{color:#117a8b!important}.text-warning{color:#ffc107!important}a.text-warning:focus,a.text-warning:hover{color:#d39e00!important}.text-danger{color:#dc3545!important}a.text-danger:focus,a.text-danger:hover{color:#bd2130!important}.text-light{color:#f8f9fa!important}a.text-light:focus,a.text-light:hover{color:#dae0e5!important}.text-dark{color:#343a40!important}a.text-dark:focus,a.text-dark:hover{color:#1d2124!important}.text-body{color:#212529!important}.text-muted{color:#6c757d!important}.text-black-50{color:rgba(0,0,0,.5)!important}.text-white-50{color:rgba(255,255,255,.5)!important}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.visible{visibility:visible!important}.invisible{visibility:hidden!important}@media print{*,::after,::before{text-shadow:none!important;box-shadow:none!important}a:not(.btn){text-decoration:underline}abbr[title]::after{content:" (" attr(title) ")"}pre{white-space:pre-wrap!important}blockquote,pre{border:1px solid #adb5bd;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}@page{size:a3}body{min-width:992px!important}.container{min-width:992px!important}.navbar{display:none}.badge{border:1px solid #000}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px solid #dee2e6!important}.table-dark{color:inherit}.table-dark tbody+tbody,.table-dark td,.table-dark th,.table-dark thead th{border-color:#dee2e6}.table .thead-dark th{color:inherit;border-color:#dee2e6}} -/*# sourceMappingURL=bootstrap.min.css.map */ diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_537227KB.py b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_537227KB.py deleted file mode 100644 index 9bb1df1ee93d3af49725f60ac0b6052e057c6872..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_537227KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import layers_537238KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 64) - self.stg1_high_band_net = BaseASPPNet(2, 64) - - self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(32, 64) - - self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(64, 128) - - self.out = nn.Conv2d(128, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(64, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(64, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/LaynzKunz/Model-RCV/config.py b/spaces/LaynzKunz/Model-RCV/config.py deleted file mode 100644 index 040a64d2c5ce4d7802bdf7f69321483b81008f08..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Model-RCV/config.py +++ /dev/null @@ -1,106 +0,0 @@ -import argparse -import torch -from multiprocessing import cpu_count - -class Config: - def __init__(self): - self.device = "cuda:0" - self.is_half = True - self.n_cpu = 0 - self.gpu_name = None - self.gpu_mem = None - ( - self.python_cmd, - self.listen_port, - self.colab, - self.noparallel, - self.noautoopen, - self.api - ) = self.arg_parse() - self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() - - @staticmethod - def arg_parse() -> tuple: - parser = argparse.ArgumentParser() - parser.add_argument("--port", type=int, default=7865, help="Listen port") - parser.add_argument( - "--pycmd", type=str, default="python", help="Python command" - ) - parser.add_argument("--colab", action="store_true", help="Launch in colab") - parser.add_argument( - "--noparallel", action="store_true", help="Disable parallel processing" - ) - parser.add_argument( - "--noautoopen", - action="store_true", - help="Do not open in browser automatically", - ) - parser.add_argument("--api", action="store_true", help="Launch with api") - cmd_opts = parser.parse_args() - - cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865 - - return ( - cmd_opts.pycmd, - cmd_opts.port, - cmd_opts.colab, - cmd_opts.noparallel, - cmd_opts.noautoopen, - cmd_opts.api - ) - - def device_config(self) -> tuple: - if torch.cuda.is_available(): - i_device = int(self.device.split(":")[-1]) - self.gpu_name = torch.cuda.get_device_name(i_device) - if ( - ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) - or "P40" in self.gpu_name.upper() - or "1060" in self.gpu_name - or "1070" in self.gpu_name - or "1080" in self.gpu_name - ): - print("16系/10系显卡和P40强制单精度") - self.is_half = False - - else: - self.gpu_name = None - self.gpu_mem = int( - torch.cuda.get_device_properties(i_device).total_memory - / 1024 - / 1024 - / 1024 - + 0.4 - ) - elif torch.backends.mps.is_available(): - print("没有发现支持的N卡, 使用MPS进行推理") - self.device = "mps" - self.is_half = False - else: - print("没有发现支持的N卡, 使用CPU进行推理") - self.device = "cpu" - self.is_half = False - - if self.n_cpu == 0: - self.n_cpu = cpu_count() - - if self.is_half: - # 6G显存配置 - x_pad = 3 - x_query = 10 - x_center = 60 - x_max = 65 - else: - # 5G显存配置 - x_pad = 1 - x_query = 6 - x_center = 38 - x_max = 41 - - if self.gpu_mem != None and self.gpu_mem <= 4: - x_pad = 1 - x_query = 5 - x_center = 30 - x_max = 32 - - return x_pad, x_query, x_center, x_max diff --git a/spaces/Loren/Streamlit_OCR_comparator/configs/textdet/psenet/README.md b/spaces/Loren/Streamlit_OCR_comparator/configs/textdet/psenet/README.md deleted file mode 100644 index b4293a3ce823c5dd285fda86dbc47b41465129b3..0000000000000000000000000000000000000000 --- a/spaces/Loren/Streamlit_OCR_comparator/configs/textdet/psenet/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# PSENet - -> [Shape robust text detection with progressive scale expansion network](https://arxiv.org/abs/1903.12473) - - - -## Abstract - -Scene text detection has witnessed rapid progress especially with the recent development of convolutional neural networks. However, there still exists two challenges which prevent the algorithm into industry applications. On the one hand, most of the state-of-art algorithms require quadrangle bounding box which is in-accurate to locate the texts with arbitrary shape. On the other hand, two text instances which are close to each other may lead to a false detection which covers both instances. Traditionally, the segmentation-based approach can relieve the first problem but usually fail to solve the second challenge. To address these two challenges, in this paper, we propose a novel Progressive Scale Expansion Network (PSENet), which can precisely detect text instances with arbitrary shapes. More specifically, PSENet generates the different scale of kernels for each text instance, and gradually expands the minimal scale kernel to the text instance with the complete shape. Due to the fact that there are large geometrical margins among the minimal scale kernels, our method is effective to split the close text instances, making it easier to use segmentation-based methods to detect arbitrary-shaped text instances. Extensive experiments on CTW1500, Total-Text, ICDAR 2015 and ICDAR 2017 MLT validate the effectiveness of PSENet. Notably, on CTW1500, a dataset full of long curve texts, PSENet achieves a F-measure of 74.3% at 27 FPS, and our best F-measure (82.2%) outperforms state-of-art algorithms by 6.6%. The code will be released in the future. - -
- -
- -## Results and models - -### CTW1500 - -| Method | Backbone | Extra Data | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download | -| :------------------------------------------------: | :------: | :--------: | :-----------: | :----------: | :-----: | :-------: | :-----------: | :-----------: | :-----------: | :--------------------------------------------------: | -| [PSENet-4s](configs/textdet/psenet/psenet_r50_fpnf_600e_ctw1500.py) | ResNet50 | - | CTW1500 Train | CTW1500 Test | 600 | 1280 | 0.728 (0.717) | 0.849 (0.852) | 0.784 (0.779) | [model](https://download.openmmlab.com/mmocr/textdet/psenet/psenet_r50_fpnf_600e_ctw1500_20210401-216fed50.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/psenet/20210401_215421.log.json) | - -### ICDAR2015 - -| Method | Backbone | Extra Data | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download | -| :----------------------------------: | :------: | :---------------------------------------: | :----------: | :-------: | :-----: | :-------: | :-----------: | :-----------: | :-----------: | :-------------------------------------: | -| [PSENet-4s](configs/textdet/psenet/psenet_r50_fpnf_600e_icdar2015.py) | ResNet50 | - | IC15 Train | IC15 Test | 600 | 2240 | 0.784 (0.753) | 0.831 (0.867) | 0.807 (0.806) | [model](https://download.openmmlab.com/mmocr/textdet/psenet/psenet_r50_fpnf_600e_icdar2015-c6131f0d.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/psenet/20210331_214145.log.json) | -| [PSENet-4s](configs/textdet/psenet/psenet_r50_fpnf_600e_icdar2015.py) | ResNet50 | pretrain on IC17 MLT [model](https://download.openmmlab.com/mmocr/textdet/psenet/psenet_r50_fpnf_600e_icdar2017_as_pretrain-3bd6056c.pth) | IC15 Train | IC15 Test | 600 | 2240 | 0.834 | 0.861 | 0.847 | [model](https://download.openmmlab.com/mmocr/textdet/psenet/psenet_r50_fpnf_600e_icdar2015_pretrain-eefd8fe6.pth) \| [log](<>) | - -```{note} -We've upgraded our IoU backend from `Polygon3` to `shapely`. There are some performance differences for some models due to the backends' different logics to handle invalid polygons (more info [here](https://github.com/open-mmlab/mmocr/issues/465)). **New evaluation result is presented in brackets** and new logs will be uploaded soon. -``` - -## Citation - -```bibtex -@inproceedings{wang2019shape, - title={Shape robust text detection with progressive scale expansion network}, - author={Wang, Wenhai and Xie, Enze and Li, Xiang and Hou, Wenbo and Lu, Tong and Yu, Gang and Shao, Shuai}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={9336--9345}, - year={2019} -} -``` diff --git a/spaces/LuxOAI/ChatGpt-Web/app/components/new-chat.tsx b/spaces/LuxOAI/ChatGpt-Web/app/components/new-chat.tsx deleted file mode 100644 index 81858fb0218c56e7ebd24a76c73163d9f5605f6c..0000000000000000000000000000000000000000 --- a/spaces/LuxOAI/ChatGpt-Web/app/components/new-chat.tsx +++ /dev/null @@ -1,197 +0,0 @@ -import { useEffect, useRef, useState } from "react"; -import { Path, SlotID } from "../constant"; -import { IconButton } from "./button"; -import { EmojiAvatar } from "./emoji"; -import styles from "./new-chat.module.scss"; - -import LeftIcon from "../icons/left.svg"; -import LightningIcon from "../icons/lightning.svg"; -import EyeIcon from "../icons/eye.svg"; - -import { useLocation, useNavigate } from "react-router-dom"; -import { Mask, useMaskStore } from "../store/mask"; -import Locale from "../locales"; -import { useAppConfig, useChatStore } from "../store"; -import { MaskAvatar } from "./mask"; -import { useCommand } from "../command"; - -function getIntersectionArea(aRect: DOMRect, bRect: DOMRect) { - const xmin = Math.max(aRect.x, bRect.x); - const xmax = Math.min(aRect.x + aRect.width, bRect.x + bRect.width); - const ymin = Math.max(aRect.y, bRect.y); - const ymax = Math.min(aRect.y + aRect.height, bRect.y + bRect.height); - const width = xmax - xmin; - const height = ymax - ymin; - const intersectionArea = width < 0 || height < 0 ? 0 : width * height; - return intersectionArea; -} - -function MaskItem(props: { mask: Mask; onClick?: () => void }) { - const domRef = useRef(null); - - useEffect(() => { - const changeOpacity = () => { - const dom = domRef.current; - const parent = document.getElementById(SlotID.AppBody); - if (!parent || !dom) return; - - const domRect = dom.getBoundingClientRect(); - const parentRect = parent.getBoundingClientRect(); - const intersectionArea = getIntersectionArea(domRect, parentRect); - const domArea = domRect.width * domRect.height; - const ratio = intersectionArea / domArea; - const opacity = ratio > 0.9 ? 1 : 0.4; - dom.style.opacity = opacity.toString(); - }; - - setTimeout(changeOpacity, 30); - - window.addEventListener("resize", changeOpacity); - - return () => window.removeEventListener("resize", changeOpacity); - }, [domRef]); - - return ( -
- -
{props.mask.name}
-
- ); -} - -function useMaskGroup(masks: Mask[]) { - const [groups, setGroups] = useState([]); - - useEffect(() => { - const appBody = document.getElementById(SlotID.AppBody); - if (!appBody || masks.length === 0) return; - - const rect = appBody.getBoundingClientRect(); - const maxWidth = rect.width; - const maxHeight = rect.height * 0.6; - const maskItemWidth = 120; - const maskItemHeight = 50; - - const randomMask = () => masks[Math.floor(Math.random() * masks.length)]; - let maskIndex = 0; - const nextMask = () => masks[maskIndex++ % masks.length]; - - const rows = Math.ceil(maxHeight / maskItemHeight); - const cols = Math.ceil(maxWidth / maskItemWidth); - - const newGroups = new Array(rows) - .fill(0) - .map((_, _i) => - new Array(cols) - .fill(0) - .map((_, j) => (j < 1 || j > cols - 2 ? randomMask() : nextMask())), - ); - - setGroups(newGroups); - - // eslint-disable-next-line react-hooks/exhaustive-deps - }, []); - - return groups; -} - -export function NewChat() { - const chatStore = useChatStore(); - const maskStore = useMaskStore(); - - const masks = maskStore.getAll(); - const groups = useMaskGroup(masks); - - const navigate = useNavigate(); - const config = useAppConfig(); - - const { state } = useLocation(); - - const startChat = (mask?: Mask) => { - chatStore.newSession(mask); - setTimeout(() => navigate(Path.Chat), 1); - }; - - useCommand({ - mask: (id) => { - try { - const mask = maskStore.get(parseInt(id)); - startChat(mask ?? undefined); - } catch { - console.error("[New Chat] failed to create chat from mask id=", id); - } - }, - }); - - return ( -
-
- } - text={Locale.NewChat.Return} - onClick={() => navigate(Path.Home)} - > - {!state?.fromHome && ( - { - if (confirm(Locale.NewChat.ConfirmNoShow)) { - startChat(); - config.update( - (config) => (config.dontShowMaskSplashScreen = true), - ); - } - }} - > - )} -
-
-
- -
-
- -
-
- -
-
- -
{Locale.NewChat.Title}
-
{Locale.NewChat.SubTitle}
- -
- startChat()} - icon={} - type="primary" - shadow - /> - - navigate(Path.Masks)} - icon={} - bordered - shadow - /> -
- -
- {groups.map((masks, i) => ( -
- {masks.map((mask, index) => ( - startChat(mask)} - /> - ))} -
- ))} -
-
- ); -} diff --git a/spaces/Mahiruoshi/BangDream-Bert-VITS2/bert/bert-base-japanese-v3/README.md b/spaces/Mahiruoshi/BangDream-Bert-VITS2/bert/bert-base-japanese-v3/README.md deleted file mode 100644 index c5b3456719f01801a2f29fef5faa8ee672391adf..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/BangDream-Bert-VITS2/bert/bert-base-japanese-v3/README.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -license: apache-2.0 -datasets: -- cc100 -- wikipedia -language: -- ja -widget: -- text: 東北大学で[MASK]の研究をしています。 ---- - -# BERT base Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102) - -This is a [BERT](https://github.com/google-research/bert) model pretrained on texts in the Japanese language. - -This version of the model processes input texts with word-level tokenization based on the Unidic 2.1.2 dictionary (available in [unidic-lite](https://pypi.org/project/unidic-lite/) package), followed by the WordPiece subword tokenization. -Additionally, the model is trained with the whole word masking enabled for the masked language modeling (MLM) objective. - -The codes for the pretraining are available at [cl-tohoku/bert-japanese](https://github.com/cl-tohoku/bert-japanese/). - -## Model architecture - -The model architecture is the same as the original BERT base model; 12 layers, 768 dimensions of hidden states, and 12 attention heads. - -## Training Data - -The model is trained on the Japanese portion of [CC-100 dataset](https://data.statmt.org/cc-100/) and the Japanese version of Wikipedia. -For Wikipedia, we generated a text corpus from the [Wikipedia Cirrussearch dump file](https://dumps.wikimedia.org/other/cirrussearch/) as of January 2, 2023. -The corpus files generated from CC-100 and Wikipedia are 74.3GB and 4.9GB in size and consist of approximately 392M and 34M sentences, respectively. - -For the purpose of splitting texts into sentences, we used [fugashi](https://github.com/polm/fugashi) with [mecab-ipadic-NEologd](https://github.com/neologd/mecab-ipadic-neologd) dictionary (v0.0.7). - -## Tokenization - -The texts are first tokenized by MeCab with the Unidic 2.1.2 dictionary and then split into subwords by the WordPiece algorithm. -The vocabulary size is 32768. - -We used [fugashi](https://github.com/polm/fugashi) and [unidic-lite](https://github.com/polm/unidic-lite) packages for the tokenization. - -## Training - -We trained the model first on the CC-100 corpus for 1M steps and then on the Wikipedia corpus for another 1M steps. -For training of the MLM (masked language modeling) objective, we introduced whole word masking in which all of the subword tokens corresponding to a single word (tokenized by MeCab) are masked at once. - -For training of each model, we used a v3-8 instance of Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/). - -## Licenses - -The pretrained models are distributed under the Apache License 2.0. - -## Acknowledgments - -This model is trained with Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/) program. diff --git a/spaces/Marshalls/testmtd/models/util/distributions.py b/spaces/Marshalls/testmtd/models/util/distributions.py deleted file mode 100644 index 139597f9cb07c5d48bed18984ec4747f4b4f3438..0000000000000000000000000000000000000000 --- a/spaces/Marshalls/testmtd/models/util/distributions.py +++ /dev/null @@ -1,2 +0,0 @@ - - diff --git a/spaces/MathysL/AutoGPT4/autogpt/commands/web_playwright.py b/spaces/MathysL/AutoGPT4/autogpt/commands/web_playwright.py deleted file mode 100644 index 4e388ded203cefb5e24f9116f7fe5b8a94893413..0000000000000000000000000000000000000000 --- a/spaces/MathysL/AutoGPT4/autogpt/commands/web_playwright.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Web scraping commands using Playwright""" -from __future__ import annotations - -try: - from playwright.sync_api import sync_playwright -except ImportError: - print( - "Playwright not installed. Please install it with 'pip install playwright' to use." - ) -from bs4 import BeautifulSoup - -from autogpt.processing.html import extract_hyperlinks, format_hyperlinks - - -def scrape_text(url: str) -> str: - """Scrape text from a webpage - - Args: - url (str): The URL to scrape text from - - Returns: - str: The scraped text - """ - with sync_playwright() as p: - browser = p.chromium.launch() - page = browser.new_page() - - try: - page.goto(url) - html_content = page.content() - soup = BeautifulSoup(html_content, "html.parser") - - for script in soup(["script", "style"]): - script.extract() - - text = soup.get_text() - lines = (line.strip() for line in text.splitlines()) - chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) - text = "\n".join(chunk for chunk in chunks if chunk) - - except Exception as e: - text = f"Error: {str(e)}" - - finally: - browser.close() - - return text - - -def scrape_links(url: str) -> str | list[str]: - """Scrape links from a webpage - - Args: - url (str): The URL to scrape links from - - Returns: - Union[str, List[str]]: The scraped links - """ - with sync_playwright() as p: - browser = p.chromium.launch() - page = browser.new_page() - - try: - page.goto(url) - html_content = page.content() - soup = BeautifulSoup(html_content, "html.parser") - - for script in soup(["script", "style"]): - script.extract() - - hyperlinks = extract_hyperlinks(soup, url) - formatted_links = format_hyperlinks(hyperlinks) - - except Exception as e: - formatted_links = f"Error: {str(e)}" - - finally: - browser.close() - - return formatted_links diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/exp/upernet_global_small/test.sh b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/exp/upernet_global_small/test.sh deleted file mode 100644 index d9a85e7a0d3b7c96b060f473d41254b37a382fcb..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/exp/upernet_global_small/test.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -work_path=$(dirname $0) -PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \ -python -m torch.distributed.launch --nproc_per_node=8 \ - tools/test.py ${work_path}/test_config_h32.py \ - ${work_path}/ckpt/latest.pth \ - --launcher pytorch \ - --eval mIoU \ - 2>&1 | tee -a ${work_path}/log.txt diff --git a/spaces/MirageML/point-e/app.py b/spaces/MirageML/point-e/app.py deleted file mode 100644 index faa7f3915c09be6c655a7dfc34628d510252845b..0000000000000000000000000000000000000000 --- a/spaces/MirageML/point-e/app.py +++ /dev/null @@ -1,296 +0,0 @@ -import os -import gradio as gr -from PIL import Image -import torch -import matplotlib.pyplot as plt -import imageio -import numpy as np -import argparse - -from point_e.diffusion.configs import DIFFUSION_CONFIGS, diffusion_from_config -from point_e.diffusion.sampler import PointCloudSampler -from point_e.models.download import load_checkpoint -from point_e.models.configs import MODEL_CONFIGS, model_from_config -from point_e.util.plotting import plot_point_cloud -from point_e.util.pc_to_mesh import marching_cubes_mesh - -from diffusers import StableDiffusionPipeline - -import trimesh - - -state = "" -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - -css = ''' - .instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important} - .arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important} - #component-4, #component-3, #component-10{min-height: 0} - .duplicate-button img{margin: 0} -''' - -def set_state(s): - print(s) - global state - state = s - -def get_state(): - return state - -def load_img2mesh_model(model_name): - set_state(f'Creating img2mesh model {model_name}...') - i2m_name = model_name - i2m_model = model_from_config(MODEL_CONFIGS[i2m_name], device) - i2m_model.eval() - base_diffusion_i2m = diffusion_from_config(DIFFUSION_CONFIGS[i2m_name]) - - set_state(f'Downloading img2mesh checkpoint {model_name}...') - i2m_model.load_state_dict(load_checkpoint(i2m_name, device)) - - return i2m_model, base_diffusion_i2m - - - -def get_sampler(model_name, txt2obj, guidance_scale): - if txt2obj: - set_state('Creating txt2mesh model...') - t2m_name = 'base40M-textvec' - t2m_model = model_from_config(MODEL_CONFIGS[t2m_name], device) - t2m_model.eval() - base_diffusion_t2m = diffusion_from_config(DIFFUSION_CONFIGS[t2m_name]) - - set_state('Downloading txt2mesh checkpoint...') - t2m_model.load_state_dict(load_checkpoint(t2m_name, device)) - else: - i2m_model, base_diffusion_i2m = load_img2mesh_model(model_name) - - set_state('Creating upsample model...') - upsampler_model = model_from_config(MODEL_CONFIGS['upsample'], device) - upsampler_model.eval() - upsampler_diffusion = diffusion_from_config(DIFFUSION_CONFIGS['upsample']) - - set_state('Downloading upsampler checkpoint...') - upsampler_model.load_state_dict(load_checkpoint('upsample', device)) - - return PointCloudSampler( - device=device, - models=[t2m_model if txt2obj else i2m_model, upsampler_model], - diffusions=[base_diffusion_t2m if txt2obj else base_diffusion_i2m, upsampler_diffusion], - num_points=[1024, 4096 - 1024], - aux_channels=['R', 'G', 'B'], - guidance_scale=[guidance_scale, 0.0 if txt2obj else guidance_scale], - model_kwargs_key_filter=('texts', '') if txt2obj else ("*",) - ) - -def generate_txt2img(prompt): - pipe = StableDiffusionPipeline.from_pretrained("point_e_model_cache/stable-diffusion-2-1", torch_dtype=torch.float16) - pipe = pipe.to("cuda") - image = pipe(prompt).images[0] - - return image - -def generate_3D(input, model_name='base1B', guidance_scale=3.0, grid_size=128): - set_state('Entered generate function...') - - # try: - # input = Image.fromarray(input) - # except: - # img = generate_txt2img(input) - # img.save('/tmp/img.png') - # input = Image.open('/tmp/img.png') - - if isinstance(input, Image.Image): - input = prepare_img(input) - - # if input is a string, it's a text prompt - sampler = get_sampler(model_name, txt2obj=True if isinstance(input, str) else False, guidance_scale=guidance_scale) - - # Produce a sample from the model. - set_state('Sampling...') - samples = None - kw_args = dict(texts=[input]) if isinstance(input, str) else dict(images=[input]) - for x in sampler.sample_batch_progressive(batch_size=1, model_kwargs=kw_args): - samples = x - - set_state('Converting to point cloud...') - pc = sampler.output_to_point_clouds(samples)[0] - - set_state('Converting to mesh...') - save_ply(pc, '/tmp/mesh.ply', grid_size) - - set_state('') - - return ply_to_glb('/tmp/mesh.ply', '/tmp/mesh.glb'), create_gif(pc), gr.update(value=['/tmp/mesh.glb', '/tmp/mesh.ply'], visible=True) - -def prepare_img(img): - - w, h = img.size - if w > h: - img = img.crop((w - h) / 2, 0, w - (w - h) / 2, h) - else: - img = img.crop((0, (h - w) / 2, w, h - (h - w) / 2)) - - # resize to 256x256 - img = img.resize((256, 256)) - - return img - - -def ply_to_glb(ply_file, glb_file): - mesh = trimesh.load(ply_file) - - # Save the mesh as a glb file using Trimesh - mesh.export(glb_file, file_type='glb') - - return glb_file - -def save_ply(pc, file_name, grid_size): - set_state('Creating SDF model...') - sdf_name = 'sdf' - sdf_model = model_from_config(MODEL_CONFIGS[sdf_name], device) - sdf_model.eval() - - set_state('Loading SDF model...') - sdf_model.load_state_dict(load_checkpoint(sdf_name, device)) - - # Produce a mesh (with vertex colors) - mesh = marching_cubes_mesh( - pc=pc, - model=sdf_model, - batch_size=4096, - grid_size=grid_size, # increase to 128 for resolution used in evals - progress=True, - ) - - # Write the mesh to a PLY file to import into some other program. - with open(file_name, 'wb') as f: - mesh.write_ply(f) - -def create_gif(pc): - fig = plt.figure(facecolor='black', figsize=(4, 4)) - ax = fig.add_subplot(111, projection='3d', facecolor='black') - fixed_bounds=((-0.75, -0.75, -0.75),(0.75, 0.75, 0.75)) - - # Create an empty list to store the frames - frames = [] - - # Create a loop to generate the frames for the GIF - for angle in range(0, 360, 4): - # Clear the plot and plot the point cloud - ax.clear() - color_args = np.stack( - [pc.channels["R"], pc.channels["G"], pc.channels["B"]], axis=-1 - ) - c = pc.coords - - - ax.scatter(c[:, 0], c[:, 1], c[:, 2], c=color_args) - - # Set the viewpoint for the plot - ax.view_init(elev=10, azim=angle) - - # Turn off the axis labels and ticks - ax.axis('off') - ax.set_xlim3d(fixed_bounds[0][0], fixed_bounds[1][0]) - ax.set_ylim3d(fixed_bounds[0][1], fixed_bounds[1][1]) - ax.set_zlim3d(fixed_bounds[0][2], fixed_bounds[1][2]) - - # Draw the figure to update the image data - fig.canvas.draw() - - # Save the plot as a frame for the GIF - frame = np.array(fig.canvas.renderer.buffer_rgba()) - w, h = frame.shape[0], frame.shape[1] - i = int(round((h - int(h*0.6)) / 2.)) - frame = frame[i:i + int(h*0.6),i:i + int(h*0.6)] - frames.append(frame) - - # Save the GIF using imageio - imageio.mimsave('/tmp/pointcloud.mp4', frames, fps=30) - return '/tmp/pointcloud.mp4' - -block = gr.Blocks().queue(max_size=250, concurrency_count=6) -with block: - with gr.Box(): - if(not torch.cuda.is_available()): - top_description = gr.HTML(f''' -
-
- -
-

- Point-E Web UI -

-
- Duplicate Space -
-
-

- Generate 3D Assets in 2 minutes with a prompt or image! - Based on the Point-E implementation -

-
-

There's only one step left before you can train your model: attribute a T4 GPU to it (via the Settings tab) and run the training below. Other GPUs are not compatible for now. You will be billed by the minute from when you activate the GPU until when it is turned it off.

-
- ''') - else: - top_description = gr.HTML(f''' -
-
- -
-

- Point-E Web UI -

-
- Duplicate Space -
-
-

- Generate 3D Assets in 2 minutes with a prompt or image! - Based on the Point-E implementation -

-
- ''') - with gr.Row(): - with gr.Column(): - with gr.Tab("Image to 3D"): - gr.Markdown("Best results with images of objects on an empty background.") - input_image = gr.Image(label="Image") - img_button = gr.Button(label="Generate") - - with gr.Tab("Text to 3D"): - gr.Markdown("Uses Stable Diffusion to create an image from the prompt.") - prompt = gr.Textbox(label="Prompt", placeholder="A HD photo of a Corgi") - text_button = gr.Button(label="Generate") - - with gr.Accordion("Advanced options", open=False): - model = gr.Radio(["base40M", "base300M", "base1B"], label="Model", value="base1B") - scale = gr.Slider( - label="Guidance Scale", minimum=1.0, maximum=10.0, value=3.0, step=0.1 - ) - - with gr.Column(): - model_gif = gr.Video(label="3D Model GIF") - # btn_pc_to_obj = gr.Button(value="Convert to OBJ", visible=False) - model_3d = gr.Model3D(value=None) - file_out = gr.File(label="Files", visible=False) - - if torch.cuda.is_available(): - gr.Examples( - examples=[ - ["images/pumpkin.png"], - ["images/fantasy_world.png"], - ], - inputs=[input_image], - outputs=[model_3d, model_gif, file_out], - fn=generate_3D, - cache_examples=True - ) - - img_button.click(fn=generate_3D, inputs=[input_image, model, scale], outputs=[model_3d, model_gif, file_out]) - text_button.click(fn=generate_3D, inputs=[prompt, model, scale], outputs=[model_3d, model_gif, file_out]) - -block.launch(show_api=False) diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/master/master_resnet31_12e_st_mj_sa.py b/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/master/master_resnet31_12e_st_mj_sa.py deleted file mode 100644 index 7ab66ab747ed3fa946cfc09fdbf479f97710909a..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/master/master_resnet31_12e_st_mj_sa.py +++ /dev/null @@ -1,61 +0,0 @@ -_base_ = [ - '../_base_/datasets/mjsynth.py', - '../_base_/datasets/synthtext.py', - '../_base_/datasets/synthtext_add.py', - '../_base_/datasets/cute80.py', - '../_base_/datasets/iiit5k.py', - '../_base_/datasets/svt.py', - '../_base_/datasets/svtp.py', - '../_base_/datasets/icdar2013.py', - '../_base_/datasets/icdar2015.py', - '../_base_/default_runtime.py', - '../_base_/schedules/schedule_adam_base.py', - '_base_master_resnet31.py', -] - -optim_wrapper = dict(optimizer=dict(lr=4e-4)) -train_cfg = dict(max_epochs=12) -# learning policy -param_scheduler = [ - dict(type='LinearLR', end=100, by_epoch=False), - dict(type='MultiStepLR', milestones=[11], end=12), -] - -# dataset settings -train_list = [ - _base_.mjsynth_textrecog_train, _base_.synthtext_textrecog_train, - _base_.synthtext_add_textrecog_train -] -test_list = [ - _base_.cute80_textrecog_test, _base_.iiit5k_textrecog_test, - _base_.svt_textrecog_test, _base_.svtp_textrecog_test, - _base_.icdar2013_textrecog_test, _base_.icdar2015_textrecog_test -] - -train_dataset = dict( - type='ConcatDataset', datasets=train_list, pipeline=_base_.train_pipeline) -test_dataset = dict( - type='ConcatDataset', datasets=test_list, pipeline=_base_.test_pipeline) - -train_dataloader = dict( - batch_size=512, - num_workers=24, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=train_dataset) - -test_dataloader = dict( - batch_size=1, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=test_dataset) - -val_dataloader = test_dataloader - -val_evaluator = dict( - dataset_prefixes=['CUTE80', 'IIIT5K', 'SVT', 'SVTP', 'IC13', 'IC15']) -test_evaluator = val_evaluator - -auto_scale_lr = dict(base_batch_size=512 * 4) diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/common/backbones/__init__.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/common/backbones/__init__.py deleted file mode 100644 index 053ed524657ebf335ea622776687291931df2358..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/common/backbones/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .clip_resnet import CLIPResNet -from .unet import UNet -from .vit import VisionTransformer, VisionTransformer_LoRA -__all__ = ['UNet', 'CLIPResNet', 'VisionTransformer', 'VisionTransformer_LoRA'] diff --git a/spaces/NAACL2022/CLIP-Caption-Reward/captioning/data/dataloader.py b/spaces/NAACL2022/CLIP-Caption-Reward/captioning/data/dataloader.py deleted file mode 100644 index 7f2ed0304bd94db21bbc9fbdc6857beccb8bb621..0000000000000000000000000000000000000000 --- a/spaces/NAACL2022/CLIP-Caption-Reward/captioning/data/dataloader.py +++ /dev/null @@ -1,425 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import json -import h5py -from lmdbdict import lmdbdict -from lmdbdict.methods import DUMPS_FUNC, LOADS_FUNC -import os -import numpy as np -import numpy.random as npr -import random -from functools import partial - -import torch -import torch.utils.data as data - -import multiprocessing -import six - -class HybridLoader: - """ - If db_path is a director, then use normal file loading - If lmdb, then load from lmdb - The loading method depend on extention. - - in_memory: if in_memory is True, we save all the features in memory - For individual np(y|z)s, we don't need to do that because the system will do this for us. - Should be useful for lmdb or h5. - (Copied this idea from vilbert) - """ - def __init__(self, db_path, ext, in_memory=False): - self.db_path = db_path - self.ext = ext - if self.ext == '.npy': - self.loader = lambda x: np.load(six.BytesIO(x)) - else: - def load_npz(x): - x = np.load(six.BytesIO(x)) - return x['feat'] if 'feat' in x else x['z'] # normally it should be 'feat', but under cocotest_bu, the key is saved to be 'z' mistakenly. - self.loader = load_npz - if db_path.endswith('.lmdb'): - self.db_type = 'lmdb' - self.lmdb = lmdbdict(db_path, unsafe=True) - self.lmdb._key_dumps = DUMPS_FUNC['ascii'] - self.lmdb._value_loads = LOADS_FUNC['identity'] - elif db_path.endswith('.pth'): # Assume a key,value dictionary - self.db_type = 'pth' - self.feat_file = torch.load(db_path) - self.loader = lambda x: x - print('HybridLoader: ext is ignored') - elif db_path.endswith('h5'): - self.db_type = 'h5' - self.loader = lambda x: np.array(x).astype('float32') - else: - self.db_type = 'dir' - - self.in_memory = in_memory - if self.in_memory: - self.features = {} - - def get(self, key): - - if self.in_memory and key in self.features: - # We save f_input because we want to save the - # compressed bytes to save memory - f_input = self.features[key] - elif self.db_type == 'lmdb': - f_input = self.lmdb[key] - elif self.db_type == 'pth': - f_input = self.feat_file[key] - elif self.db_type == 'h5': - f_input = h5py.File(self.db_path, 'r')[key] - else: - f_input = open(os.path.join(self.db_path, key + self.ext), 'rb').read() - - if self.in_memory and key not in self.features: - self.features[key] = f_input - - # load image - feat = self.loader(f_input) - - return feat - -class Dataset(data.Dataset): - - def get_vocab_size(self): - return self.vocab_size - - def get_vocab(self): - return self.ix_to_word - - def get_seq_length(self): - return self.seq_length - - def __init__(self, opt): - self.opt = opt - self.seq_per_img = opt.seq_per_img - - # feature related options - self.use_fc = getattr(opt, 'use_fc', True) - self.use_att = getattr(opt, 'use_att', True) - self.use_box = getattr(opt, 'use_box', 0) - self.norm_att_feat = getattr(opt, 'norm_att_feat', 0) - self.norm_box_feat = getattr(opt, 'norm_box_feat', 0) - - # load the json file which contains additional information about the dataset - print('DataLoader loading json file: ', opt.input_json) - self.info = json.load(open(self.opt.input_json)) - if 'ix_to_word' in self.info: - self.ix_to_word = self.info['ix_to_word'] - self.vocab_size = len(self.ix_to_word) - print('vocab size is ', self.vocab_size) - - # open the hdf5 file - print('DataLoader loading h5 file: ', opt.input_fc_dir, opt.input_att_dir, opt.input_box_dir, opt.input_label_h5) - """ - Setting input_label_h5 to none is used when only doing generation. - For example, when you need to test on coco test set. - """ - if self.opt.input_label_h5 != 'none': - self.h5_label_file = h5py.File(self.opt.input_label_h5, 'r', driver='core') - # load in the sequence data - seq_size = self.h5_label_file['labels'].shape - self.label = self.h5_label_file['labels'][:] - self.seq_length = seq_size[1] - print('max sequence length in data is', self.seq_length) - # load the pointers in full to RAM (should be small enough) - self.label_start_ix = self.h5_label_file['label_start_ix'][:] - self.label_end_ix = self.h5_label_file['label_end_ix'][:] - else: - self.seq_length = 1 - - self.data_in_memory = getattr(opt, 'data_in_memory', False) - self.fc_loader = HybridLoader(self.opt.input_fc_dir, '.npy', in_memory=self.data_in_memory) - self.att_loader = HybridLoader(self.opt.input_att_dir, '.npz', in_memory=self.data_in_memory) - self.box_loader = HybridLoader(self.opt.input_box_dir, '.npy', in_memory=self.data_in_memory) - - self.num_images = len(self.info['images']) # self.label_start_ix.shape[0] - print('read %d image features' %(self.num_images)) - - # separate out indexes for each of the provided splits - self.split_ix = {'train': [], 'val': [], 'test': []} - for ix in range(len(self.info['images'])): - img = self.info['images'][ix] - if not 'split' in img: - self.split_ix['train'].append(ix) - self.split_ix['val'].append(ix) - self.split_ix['test'].append(ix) - elif img['split'] == 'train': - self.split_ix['train'].append(ix) - elif img['split'] == 'val': - self.split_ix['val'].append(ix) - elif img['split'] == 'test': - self.split_ix['test'].append(ix) - elif opt.train_only == 0: # restval - self.split_ix['train'].append(ix) - - print('assigned %d images to split train' %len(self.split_ix['train'])) - print('assigned %d images to split val' %len(self.split_ix['val'])) - print('assigned %d images to split test' %len(self.split_ix['test'])) - - def get_captions(self, ix, seq_per_img): - # fetch the sequence labels - ix1 = self.label_start_ix[ix] - 1 #label_start_ix starts from 1 - ix2 = self.label_end_ix[ix] - 1 - ncap = ix2 - ix1 + 1 # number of captions available for this image - assert ncap > 0, 'an image does not have any label. this can be handled but right now isn\'t' - - if ncap < seq_per_img: - # we need to subsample (with replacement) - seq = np.zeros([seq_per_img, self.seq_length], dtype = 'int') - for q in range(seq_per_img): - ixl = random.randint(ix1,ix2) - seq[q, :] = self.label[ixl, :self.seq_length] - else: - ixl = random.randint(ix1, ix2 - seq_per_img + 1) - seq = self.label[ixl: ixl + seq_per_img, :self.seq_length] - - return seq - - def collate_func(self, batch, split): - seq_per_img = self.seq_per_img - - fc_batch = [] - att_batch = [] - label_batch = [] - - wrapped = False - - infos = [] - gts = [] - - for sample in batch: - # fetch image - tmp_fc, tmp_att, tmp_seq, \ - ix, it_pos_now, tmp_wrapped = sample - if tmp_wrapped: - wrapped = True - - fc_batch.append(tmp_fc) - att_batch.append(tmp_att) - - tmp_label = np.zeros([seq_per_img, self.seq_length + 2], dtype = 'int') - if hasattr(self, 'h5_label_file'): - # if there is ground truth - tmp_label[:, 1 : self.seq_length + 1] = tmp_seq - label_batch.append(tmp_label) - - # Used for reward evaluation - if hasattr(self, 'h5_label_file'): - # if there is ground truth - gts.append(self.label[self.label_start_ix[ix] - 1: self.label_end_ix[ix]]) - else: - gts.append([]) - - # record associated info as well - info_dict = {} - info_dict['ix'] = ix - info_dict['id'] = self.info['images'][ix]['id'] - info_dict['file_path'] = self.info['images'][ix].get('file_path', '') - infos.append(info_dict) - - # #sort by att_feat length - # fc_batch, att_batch, label_batch, gts, infos = \ - # zip(*sorted(zip(fc_batch, att_batch, np.vsplit(label_batch, batch_size), gts, infos), key=lambda x: len(x[1]), reverse=True)) - fc_batch, att_batch, label_batch, gts, infos = \ - zip(*sorted(zip(fc_batch, att_batch, label_batch, gts, infos), key=lambda x: 0, reverse=True)) - data = {} - data['fc_feats'] = np.stack(fc_batch) - # merge att_feats - max_att_len = max([_.shape[0] for _ in att_batch]) - data['att_feats'] = np.zeros([len(att_batch), max_att_len, att_batch[0].shape[1]], dtype = 'float32') - for i in range(len(att_batch)): - data['att_feats'][i, :att_batch[i].shape[0]] = att_batch[i] - data['att_masks'] = np.zeros(data['att_feats'].shape[:2], dtype='float32') - for i in range(len(att_batch)): - data['att_masks'][i, :att_batch[i].shape[0]] = 1 - # set att_masks to None if attention features have same length - if data['att_masks'].sum() == data['att_masks'].size: - data['att_masks'] = None - - data['labels'] = np.vstack(label_batch) - # generate mask - nonzeros = np.array(list(map(lambda x: (x != 0).sum()+2, data['labels']))) - mask_batch = np.zeros([data['labels'].shape[0], self.seq_length + 2], dtype = 'float32') - for ix, row in enumerate(mask_batch): - row[:nonzeros[ix]] = 1 - data['masks'] = mask_batch - data['labels'] = data['labels'].reshape(len(batch), seq_per_img, -1) - data['masks'] = data['masks'].reshape(len(batch), seq_per_img, -1) - - data['gts'] = gts # all ground truth captions of each images - data['bounds'] = {'it_pos_now': it_pos_now, # the it_pos_now of the last sample - 'it_max': len(self.split_ix[split]), 'wrapped': wrapped} - data['infos'] = infos - - data = {k:torch.from_numpy(v) if type(v) is np.ndarray else v for k,v in data.items()} # Turn all ndarray to torch tensor - - return data - - def __getitem__(self, index): - """This function returns a tuple that is further passed to collate_fn - """ - ix, it_pos_now, wrapped = index #self.split_ix[index] - if self.use_att: - att_feat = self.att_loader.get(str(self.info['images'][ix]['id'])) - # Reshape to K x C - att_feat = att_feat.reshape(-1, att_feat.shape[-1]) - if self.norm_att_feat: - att_feat = att_feat / np.linalg.norm(att_feat, 2, 1, keepdims=True) - if self.use_box: - box_feat = self.box_loader.get(str(self.info['images'][ix]['id'])) - # devided by image width and height - x1,y1,x2,y2 = np.hsplit(box_feat, 4) - h,w = self.info['images'][ix]['height'], self.info['images'][ix]['width'] - box_feat = np.hstack((x1/w, y1/h, x2/w, y2/h, (x2-x1)*(y2-y1)/(w*h))) # question? x2-x1+1?? - if self.norm_box_feat: - box_feat = box_feat / np.linalg.norm(box_feat, 2, 1, keepdims=True) - att_feat = np.hstack([att_feat, box_feat]) - # sort the features by the size of boxes - att_feat = np.stack(sorted(att_feat, key=lambda x:x[-1], reverse=True)) - else: - att_feat = np.zeros((0,0), dtype='float32') - if self.use_fc: - try: - fc_feat = self.fc_loader.get(str(self.info['images'][ix]['id'])) - except: - # Use average of attention when there is no fc provided (For bottomup feature) - fc_feat = att_feat.mean(0) - else: - fc_feat = np.zeros((0), dtype='float32') - if hasattr(self, 'h5_label_file'): - seq = self.get_captions(ix, self.seq_per_img) - else: - seq = None - return (fc_feat, - att_feat, seq, - ix, it_pos_now, wrapped) - - def __len__(self): - return len(self.info['images']) - -class DataLoader: - def __init__(self, opt): - self.opt = opt - self.batch_size = self.opt.batch_size - self.dataset = Dataset(opt) - - # Initialize loaders and iters - self.loaders, self.iters = {}, {} - for split in ['train', 'val', 'test']: - if split == 'train': - sampler = MySampler(self.dataset.split_ix[split], shuffle=True, wrap=True) - else: - sampler = MySampler(self.dataset.split_ix[split], shuffle=False, wrap=False) - self.loaders[split] = data.DataLoader(dataset=self.dataset, - batch_size=self.batch_size, - sampler=sampler, - pin_memory=True, - num_workers=4, # 4 is usually enough - collate_fn=partial(self.dataset.collate_func, split=split), - drop_last=False) - self.iters[split] = iter(self.loaders[split]) - - def get_batch(self, split): - try: - data = next(self.iters[split]) - except StopIteration: - self.iters[split] = iter(self.loaders[split]) - data = next(self.iters[split]) - return data - - def reset_iterator(self, split): - self.loaders[split].sampler._reset_iter() - self.iters[split] = iter(self.loaders[split]) - - def get_vocab_size(self): - return self.dataset.get_vocab_size() - - @property - def vocab_size(self): - return self.get_vocab_size() - - def get_vocab(self): - return self.dataset.get_vocab() - - def get_seq_length(self): - return self.dataset.get_seq_length() - - @property - def seq_length(self): - return self.get_seq_length() - - def state_dict(self): - def get_prefetch_num(split): - if self.loaders[split].num_workers > 0: - return (self.iters[split]._send_idx - self.iters[split]._rcvd_idx) * self.batch_size - else: - return 0 - return {split: loader.sampler.state_dict(get_prefetch_num(split)) \ - for split, loader in self.loaders.items()} - - def load_state_dict(self, state_dict=None): - if state_dict is None: - return - for split in self.loaders.keys(): - self.loaders[split].sampler.load_state_dict(state_dict[split]) - - -class MySampler(data.sampler.Sampler): - def __init__(self, index_list, shuffle, wrap): - self.index_list = index_list - self.shuffle = shuffle - self.wrap = wrap - # if wrap, there will be not stop iteration called - # wrap True used during training, and wrap False used during test. - self._reset_iter() - - def __iter__(self): - return self - - def __next__(self): - wrapped = False - if self.iter_counter == len(self._index_list): - self._reset_iter() - if self.wrap: - wrapped = True - else: - raise StopIteration() - if len(self._index_list) == 0: # overflow when 0 samples - return None - elem = (self._index_list[self.iter_counter], self.iter_counter+1, wrapped) - self.iter_counter += 1 - return elem - - def next(self): - return self.__next__() - - def _reset_iter(self): - if self.shuffle: - rand_perm = npr.permutation(len(self.index_list)) - self._index_list = [self.index_list[_] for _ in rand_perm] - else: - self._index_list = self.index_list - - self.iter_counter = 0 - - def __len__(self): - return len(self.index_list) - - def load_state_dict(self, state_dict=None): - if state_dict is None: - return - self._index_list = state_dict['index_list'] - self.iter_counter = state_dict['iter_counter'] - - def state_dict(self, prefetched_num=None): - prefetched_num = prefetched_num or 0 - return { - 'index_list': self._index_list, - 'iter_counter': self.iter_counter - prefetched_num - } - - \ No newline at end of file diff --git a/spaces/NCTCMumbai/NCTC/models/research/brain_coder/single_task/test_tasks.py b/spaces/NCTCMumbai/NCTC/models/research/brain_coder/single_task/test_tasks.py deleted file mode 100644 index fb07a12653ebad6b38dc3786e749d3e8bf2b2072..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/research/brain_coder/single_task/test_tasks.py +++ /dev/null @@ -1,127 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Tasks that test correctness of algorithms.""" - -from six.moves import xrange -from common import reward as reward_lib # brain coder -from single_task import misc # brain coder - - -class BasicTaskManager(object): - """Wraps a generic reward function.""" - - def __init__(self, reward_fn): - self.reward_fn = reward_fn - self.good_reward = 1.0 - - def _score_string(self, string): - actions = misc.bf_string_to_tokens(string) - reward, correct = self.reward_fn(actions) - return misc.RewardInfo( - episode_rewards=[0.0] * (len(string) - 1) + [reward], - input_case=None, - correct_output=None, - code_output=actions, - input_type=None, - output_type=misc.IOType.integer, - reason='correct' if correct else 'wrong') - - def rl_batch(self, batch_size): - reward_fns = [self._score_string] * batch_size - return reward_fns - - -class Trie(object): - """Trie for sequences.""" - EOS = () - - def __init__(self): - self.trie = {} - - def insert(self, sequence): - d = self.trie - for e in sequence: - if e not in d: - d[e] = {} - d = d[e] - d[self.EOS] = True # Terminate sequence. - - def prefix_match(self, sequence): - """Return prefix of `sequence` which exists in the trie.""" - d = self.trie - index = 0 - for i, e in enumerate(sequence + [self.EOS]): - index = i - if e in d: - d = d[e] - if e == self.EOS: - return sequence, True - else: - break - return sequence[:index], False - - def next_choices(self, sequence): - d = self.trie - for e in sequence: - if e in d: - d = d[e] - else: - raise ValueError('Sequence not a prefix: %s' % (sequence,)) - return d.keys() - - -class HillClimbingTask(object): - """Simple task that tests reward hill climbing ability. - - There are a set of paths (sequences of tokens) which are rewarded. The total - reward for a path is proportional to its length, so the longest path is the - target. Shorter paths can be dead ends. - """ - - def __init__(self): - # Paths are sequences of sub-sequences. Here we form unique sub-sequences - # out of 3 arbitrary ints. We use sub-sequences instead of single entities - # to make the task harder by making the episodes last longer, i.e. more - # for the agent to remember. - a = (1, 2, 3) - b = (4, 5, 6) - c = (7, 8, 7) - d = (6, 5, 4) - e = (3, 2, 1) - f = (8, 5, 1) - g = (6, 4, 2) - h = (1, 8, 3) - self.paths = Trie() - self.paths.insert([a, b, h]) - self.paths.insert([a, b, c, d, e, f, g, h]) - self.paths.insert([a, b, c, d, e, b, a]) - self.paths.insert([a, b, g, h]) - self.paths.insert([a, e, f, g]) - self.correct_sequence = misc.flatten([a, b, c, d, e, f, g, h]) - - def distance_fn(a, b): - len_diff = abs(len(a) - len(b)) - return sum(reward_lib.mod_abs_diff(ai - 1, bi - 1, 8) - for ai, bi in zip(a, b)) + len_diff * 4 # 8 / 2 = 4 - self.distance_fn = distance_fn - - def __call__(self, actions): - # Compute reward for action sequence. - actions = [a for a in actions if a > 0] - sequence = [tuple(actions[i: i + 3]) for i in xrange(0, len(actions), 3)] - prefix, complete = self.paths.prefix_match(sequence) - if complete: - return float(len(prefix)), actions == self.correct_sequence - if len(prefix) == len(sequence): - return float(len(prefix)), False - next_pred = sequence[len(prefix)] - choices = self.paths.next_choices(prefix) - if choices == [()]: - return (len(prefix) - len(next_pred) / 3.0), False - min_dist = min(self.distance_fn(c, next_pred) for c in choices) - # +1 reward for each element in the sequence correct, plus fraction torwards - # closest next element. - # Maximum distance possible is num_actions * base / 2 = 3 * 8 / 2 = 12 - return (len(prefix) + (1 - min_dist / 12.0)), False diff --git a/spaces/NCTCMumbai/NCTC/models/research/cognitive_mapping_and_planning/src/utils.py b/spaces/NCTCMumbai/NCTC/models/research/cognitive_mapping_and_planning/src/utils.py deleted file mode 100644 index a1b9e44260b7c7884855761f56ac60d6f508c2fb..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/research/cognitive_mapping_and_planning/src/utils.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r"""Generaly Utilities. -""" - -import numpy as np, cPickle, os, time -from six.moves import xrange -import src.file_utils as fu -import logging - -class Timer(): - def __init__(self): - self.calls = 0. - self.start_time = 0. - self.time_per_call = 0. - self.total_time = 0. - self.last_log_time = 0. - - def tic(self): - self.start_time = time.time() - - def toc(self, average=True, log_at=-1, log_str='', type='calls'): - if self.start_time == 0: - logging.error('Timer not started by calling tic().') - t = time.time() - diff = time.time() - self.start_time - self.total_time += diff - self.calls += 1. - self.time_per_call = self.total_time/self.calls - - if type == 'calls' and log_at > 0 and np.mod(self.calls, log_at) == 0: - _ = [] - logging.info('%s: %f seconds.', log_str, self.time_per_call) - elif type == 'time' and log_at > 0 and t - self.last_log_time >= log_at: - _ = [] - logging.info('%s: %f seconds.', log_str, self.time_per_call) - self.last_log_time = t - - if average: - return self.time_per_call - else: - return diff - -class Foo(object): - def __init__(self, **kwargs): - self.__dict__.update(kwargs) - def __str__(self): - str_ = '' - for v in vars(self).keys(): - a = getattr(self, v) - if True: #isinstance(v, object): - str__ = str(a) - str__ = str__.replace('\n', '\n ') - else: - str__ = str(a) - str_ += '{:s}: {:s}'.format(v, str__) - str_ += '\n' - return str_ - - -def dict_equal(dict1, dict2): - assert(set(dict1.keys()) == set(dict2.keys())), "Sets of keys between 2 dictionaries are different." - for k in dict1.keys(): - assert(type(dict1[k]) == type(dict2[k])), "Type of key '{:s}' if different.".format(k) - if type(dict1[k]) == np.ndarray: - assert(dict1[k].dtype == dict2[k].dtype), "Numpy Type of key '{:s}' if different.".format(k) - assert(np.allclose(dict1[k], dict2[k])), "Value for key '{:s}' do not match.".format(k) - else: - assert(dict1[k] == dict2[k]), "Value for key '{:s}' do not match.".format(k) - return True - -def subplot(plt, Y_X, sz_y_sz_x = (10, 10)): - Y,X = Y_X - sz_y, sz_x = sz_y_sz_x - plt.rcParams['figure.figsize'] = (X*sz_x, Y*sz_y) - fig, axes = plt.subplots(Y, X) - plt.subplots_adjust(wspace=0.1, hspace=0.1) - return fig, axes - -def tic_toc_print(interval, string): - global tic_toc_print_time_old - if 'tic_toc_print_time_old' not in globals(): - tic_toc_print_time_old = time.time() - print(string) - else: - new_time = time.time() - if new_time - tic_toc_print_time_old > interval: - tic_toc_print_time_old = new_time; - print(string) - -def mkdir_if_missing(output_dir): - if not fu.exists(output_dir): - fu.makedirs(output_dir) - -def save_variables(pickle_file_name, var, info, overwrite = False): - if fu.exists(pickle_file_name) and overwrite == False: - raise Exception('{:s} exists and over write is false.'.format(pickle_file_name)) - # Construct the dictionary - assert(type(var) == list); assert(type(info) == list); - d = {} - for i in xrange(len(var)): - d[info[i]] = var[i] - with fu.fopen(pickle_file_name, 'w') as f: - cPickle.dump(d, f, cPickle.HIGHEST_PROTOCOL) - -def load_variables(pickle_file_name): - if fu.exists(pickle_file_name): - with fu.fopen(pickle_file_name, 'r') as f: - d = cPickle.load(f) - return d - else: - raise Exception('{:s} does not exists.'.format(pickle_file_name)) - -def voc_ap(rec, prec): - rec = rec.reshape((-1,1)) - prec = prec.reshape((-1,1)) - z = np.zeros((1,1)) - o = np.ones((1,1)) - mrec = np.vstack((z, rec, o)) - mpre = np.vstack((z, prec, z)) - for i in range(len(mpre)-2, -1, -1): - mpre[i] = max(mpre[i], mpre[i+1]) - - I = np.where(mrec[1:] != mrec[0:-1])[0]+1; - ap = 0; - for i in I: - ap = ap + (mrec[i] - mrec[i-1])*mpre[i]; - return ap - -def tight_imshow_figure(plt, figsize=None): - fig = plt.figure(figsize=figsize) - ax = plt.Axes(fig, [0,0,1,1]) - ax.set_axis_off() - fig.add_axes(ax) - return fig, ax - -def calc_pr(gt, out, wt=None): - if wt is None: - wt = np.ones((gt.size,1)) - - gt = gt.astype(np.float64).reshape((-1,1)) - wt = wt.astype(np.float64).reshape((-1,1)) - out = out.astype(np.float64).reshape((-1,1)) - - gt = gt*wt - tog = np.concatenate([gt, wt, out], axis=1)*1. - ind = np.argsort(tog[:,2], axis=0)[::-1] - tog = tog[ind,:] - cumsumsortgt = np.cumsum(tog[:,0]) - cumsumsortwt = np.cumsum(tog[:,1]) - prec = cumsumsortgt / cumsumsortwt - rec = cumsumsortgt / np.sum(tog[:,0]) - - ap = voc_ap(rec, prec) - return ap, rec, prec diff --git a/spaces/NN520/AI/src/components/ui/button.tsx b/spaces/NN520/AI/src/components/ui/button.tsx deleted file mode 100644 index 281da005124fa94c89a9a9db7605748a92b60865..0000000000000000000000000000000000000000 --- a/spaces/NN520/AI/src/components/ui/button.tsx +++ /dev/null @@ -1,57 +0,0 @@ -import * as React from 'react' -import { Slot } from '@radix-ui/react-slot' -import { cva, type VariantProps } from 'class-variance-authority' - -import { cn } from '@/lib/utils' - -const buttonVariants = cva( - 'inline-flex items-center justify-center rounded-md text-sm font-medium shadow ring-offset-background transition-colors outline-none disabled:pointer-events-none disabled:opacity-50', - { - variants: { - variant: { - default: - 'bg-primary text-primary-foreground shadow-md hover:bg-primary/90', - destructive: - 'bg-destructive text-destructive-foreground hover:bg-destructive/90', - outline: - 'border border-input hover:bg-accent hover:text-accent-foreground', - secondary: - 'bg-secondary text-secondary-foreground hover:bg-secondary/80', - ghost: 'shadow-none hover:bg-accent hover:text-accent-foreground', - link: 'text-primary underline-offset-4 shadow-none hover:underline' - }, - size: { - default: 'h-8 px-4 py-2', - sm: 'h-8 rounded-md px-3', - lg: 'h-11 rounded-md px-8', - icon: 'h-8 w-8 p-0' - } - }, - defaultVariants: { - variant: 'default', - size: 'default' - } - } -) - -export interface ButtonProps - extends React.ButtonHTMLAttributes, - VariantProps { - asChild?: boolean -} - -const Button = React.forwardRef( - ({ className, variant, size, asChild = false, ...props }, ref) => { - const Comp = asChild ? Slot : 'button' - return ( - - ) - } -) -Button.displayName = 'Button' - -export { Button, buttonVariants } diff --git a/spaces/NN520/AI/src/lib/hooks/use-at-bottom.tsx b/spaces/NN520/AI/src/lib/hooks/use-at-bottom.tsx deleted file mode 100644 index d37c8cf4162adcb0064e08ecec24eb731416b045..0000000000000000000000000000000000000000 --- a/spaces/NN520/AI/src/lib/hooks/use-at-bottom.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import * as React from 'react' - -export function useAtBottom(offset = 0) { - const [isAtBottom, setIsAtBottom] = React.useState(false) - - React.useEffect(() => { - const handleScroll = () => { - setIsAtBottom( - window.innerHeight + window.scrollY >= - document.body.offsetHeight - offset - ) - } - - window.addEventListener('scroll', handleScroll, { passive: true }) - handleScroll() - - return () => { - window.removeEventListener('scroll', handleScroll) - } - }, [offset]) - - return isAtBottom -} diff --git a/spaces/Ngadou/Social_Engineering_Detection/README.md b/spaces/Ngadou/Social_Engineering_Detection/README.md deleted file mode 100644 index 24ebabda0c0f5f2303685940c015394a74ee88a9..0000000000000000000000000000000000000000 --- a/spaces/Ngadou/Social_Engineering_Detection/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Social Engineering Detection -emoji: 💻 -colorFrom: pink -colorTo: gray -sdk: gradio -sdk_version: 3.9 -app_file: app.py -pinned: false -license: cc-by-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/pdb.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/pdb.py deleted file mode 100644 index 1ba6ef0d336b30717cfdde94e1b838cfe2bfeb20..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/pdb.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import multiprocessing -import os -import pdb -import sys - - -__all__ = ["set_trace"] - - -_stdin = [None] -_stdin_lock = multiprocessing.Lock() -try: - _stdin_fd = sys.stdin.fileno() -except Exception: - _stdin_fd = None - - -class MultiprocessingPdb(pdb.Pdb): - """A Pdb wrapper that works in a multiprocessing environment. - - Usage: `from fairseq import pdb; pdb.set_trace()` - """ - - def __init__(self): - pdb.Pdb.__init__(self, nosigint=True) - - def _cmdloop(self): - stdin_bak = sys.stdin - with _stdin_lock: - try: - if _stdin_fd is not None: - if not _stdin[0]: - _stdin[0] = os.fdopen(_stdin_fd) - sys.stdin = _stdin[0] - self.cmdloop() - finally: - sys.stdin = stdin_bak - - -def set_trace(): - pdb = MultiprocessingPdb() - pdb.set_trace(sys._getframe().f_back) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/bart/summarize.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/bart/summarize.py deleted file mode 100644 index 04435f80e39c2d9d894696dae7cba5b381e13da9..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/bart/summarize.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -from fairseq.models.bart import BARTModel -import argparse - -XSUM_KWARGS = dict(beam=6, lenpen=1.0, max_len_b=60, min_len=10, no_repeat_ngram_size=3) -CNN_KWARGS = dict(beam=4, lenpen=2.0, max_len_b=140, min_len=55, no_repeat_ngram_size=3) - - -@torch.no_grad() -def generate(bart, infile, outfile="bart_hypo.txt", bsz=32, n_obs=None, **eval_kwargs): - count = 1 - - # if n_obs is not None: bsz = min(bsz, n_obs) - - with open(infile) as source, open(outfile, "w") as fout: - sline = source.readline().strip() - slines = [sline] - for sline in source: - if n_obs is not None and count > n_obs: - break - if count % bsz == 0: - hypotheses_batch = bart.sample(slines, **eval_kwargs) - for hypothesis in hypotheses_batch: - fout.write(hypothesis + "\n") - fout.flush() - slines = [] - - slines.append(sline.strip()) - count += 1 - - if slines != []: - hypotheses_batch = bart.sample(slines, **eval_kwargs) - for hypothesis in hypotheses_batch: - fout.write(hypothesis + "\n") - fout.flush() - - -def main(): - """ - Usage:: - - python examples/bart/summarize.py \ - --model-dir $HOME/bart.large.cnn \ - --model-file model.pt \ - --src $HOME/data-bin/cnn_dm/test.source - """ - parser = argparse.ArgumentParser() - parser.add_argument( - "--model-dir", - required=True, - type=str, - default="bart.large.cnn/", - help="path containing model file and src_dict.txt", - ) - parser.add_argument( - "--model-file", - default="checkpoint_best.pt", - help="where in model_dir are weights saved", - ) - parser.add_argument( - "--src", default="test.source", help="text to summarize", type=str - ) - parser.add_argument( - "--out", default="test.hypo", help="where to save summaries", type=str - ) - parser.add_argument("--bsz", default=32, help="where to save summaries", type=int) - parser.add_argument( - "--n", default=None, help="how many examples to summarize", type=int - ) - parser.add_argument( - "--xsum-kwargs", - action="store_true", - default=False, - help="if true use XSUM_KWARGS else CNN_KWARGS", - ) - args = parser.parse_args() - eval_kwargs = XSUM_KWARGS if args.xsum_kwargs else CNN_KWARGS - if args.model_dir == "pytorch/fairseq": - bart = torch.hub.load("pytorch/fairseq", args.model_file) - else: - bart = BARTModel.from_pretrained( - args.model_dir, - checkpoint_file=args.model_file, - data_name_or_path=args.model_dir, - ) - bart = bart.eval() - if torch.cuda.is_available(): - bart = bart.cuda().half() - generate( - bart, args.src, bsz=args.bsz, n_obs=args.n, outfile=args.out, **eval_kwargs - ) - - -if __name__ == "__main__": - main() diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_recognition/data/collaters.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_recognition/data/collaters.py deleted file mode 100644 index 6acfec876b87e5a00bc92083b1181301a2a18e3f..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_recognition/data/collaters.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -""" - This module contains collection of classes which implement - collate functionalities for various tasks. - - Collaters should know what data to expect for each sample - and they should pack / collate them into batches -""" - - -from __future__ import absolute_import, division, print_function, unicode_literals - -import numpy as np -import torch -from fairseq.data import data_utils as fairseq_data_utils - - -class Seq2SeqCollater(object): - """ - Implements collate function mainly for seq2seq tasks - This expects each sample to contain feature (src_tokens) and - targets. - This collator is also used for aligned training task. - """ - - def __init__( - self, - feature_index=0, - label_index=1, - pad_index=1, - eos_index=2, - move_eos_to_beginning=True, - ): - self.feature_index = feature_index - self.label_index = label_index - self.pad_index = pad_index - self.eos_index = eos_index - self.move_eos_to_beginning = move_eos_to_beginning - - def _collate_frames(self, frames): - """Convert a list of 2d frames into a padded 3d tensor - Args: - frames (list): list of 2d frames of size L[i]*f_dim. Where L[i] is - length of i-th frame and f_dim is static dimension of features - Returns: - 3d tensor of size len(frames)*len_max*f_dim where len_max is max of L[i] - """ - len_max = max(frame.size(0) for frame in frames) - f_dim = frames[0].size(1) - res = frames[0].new(len(frames), len_max, f_dim).fill_(0.0) - - for i, v in enumerate(frames): - res[i, : v.size(0)] = v - - return res - - def collate(self, samples): - """ - utility function to collate samples into batch for speech recognition. - """ - if len(samples) == 0: - return {} - - # parse samples into torch tensors - parsed_samples = [] - for s in samples: - # skip invalid samples - if s["data"][self.feature_index] is None: - continue - source = s["data"][self.feature_index] - if isinstance(source, (np.ndarray, np.generic)): - source = torch.from_numpy(source) - target = s["data"][self.label_index] - if isinstance(target, (np.ndarray, np.generic)): - target = torch.from_numpy(target).long() - elif isinstance(target, list): - target = torch.LongTensor(target) - - parsed_sample = {"id": s["id"], "source": source, "target": target} - parsed_samples.append(parsed_sample) - samples = parsed_samples - - id = torch.LongTensor([s["id"] for s in samples]) - frames = self._collate_frames([s["source"] for s in samples]) - # sort samples by descending number of frames - frames_lengths = torch.LongTensor([s["source"].size(0) for s in samples]) - frames_lengths, sort_order = frames_lengths.sort(descending=True) - id = id.index_select(0, sort_order) - frames = frames.index_select(0, sort_order) - - target = None - target_lengths = None - prev_output_tokens = None - if samples[0].get("target", None) is not None: - ntokens = sum(len(s["target"]) for s in samples) - target = fairseq_data_utils.collate_tokens( - [s["target"] for s in samples], - self.pad_index, - self.eos_index, - left_pad=False, - move_eos_to_beginning=False, - ) - target = target.index_select(0, sort_order) - target_lengths = torch.LongTensor( - [s["target"].size(0) for s in samples] - ).index_select(0, sort_order) - prev_output_tokens = fairseq_data_utils.collate_tokens( - [s["target"] for s in samples], - self.pad_index, - self.eos_index, - left_pad=False, - move_eos_to_beginning=self.move_eos_to_beginning, - ) - prev_output_tokens = prev_output_tokens.index_select(0, sort_order) - else: - ntokens = sum(len(s["source"]) for s in samples) - - batch = { - "id": id, - "ntokens": ntokens, - "net_input": {"src_tokens": frames, "src_lengths": frames_lengths}, - "target": target, - "target_lengths": target_lengths, - "nsentences": len(samples), - } - if prev_output_tokens is not None: - batch["net_input"]["prev_output_tokens"] = prev_output_tokens - return batch diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py deleted file mode 100644 index f83471409a434556cab70086ca9e2d72d4bdddd5..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python3 -u -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import sys - - -def main(): - for line in sys.stdin: - print(" ".join(list(line.strip().replace(" ", "|"))) + " |") - - -if __name__ == "__main__": - main() diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/m2m_100/tokenizers/tokenize_zh.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/m2m_100/tokenizers/tokenize_zh.py deleted file mode 100644 index 674b5849cba829cf4f07a69369e9cc6eed376d4c..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/m2m_100/tokenizers/tokenize_zh.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -import fileinput - -import sacrebleu - - -for line in fileinput.input(): - print(sacrebleu.tokenize_zh(line)) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_synthesis/preprocessing/denoiser/utils.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_synthesis/preprocessing/denoiser/utils.py deleted file mode 100644 index 734d047f1bb8e3aa98c88e152eee7f91fea3d814..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_synthesis/preprocessing/denoiser/utils.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# author: adefossez - -import functools -import logging -from contextlib import contextmanager -import inspect -import time - -logger = logging.getLogger(__name__) - -EPS = 1e-8 - - -def capture_init(init): - """capture_init. - - Decorate `__init__` with this, and you can then - recover the *args and **kwargs passed to it in `self._init_args_kwargs` - """ - @functools.wraps(init) - def __init__(self, *args, **kwargs): - self._init_args_kwargs = (args, kwargs) - init(self, *args, **kwargs) - - return __init__ - - -def deserialize_model(package, strict=False): - """deserialize_model. - - """ - klass = package['class'] - if strict: - model = klass(*package['args'], **package['kwargs']) - else: - sig = inspect.signature(klass) - kw = package['kwargs'] - for key in list(kw): - if key not in sig.parameters: - logger.warning("Dropping inexistant parameter %s", key) - del kw[key] - model = klass(*package['args'], **kw) - model.load_state_dict(package['state']) - return model - - -def copy_state(state): - return {k: v.cpu().clone() for k, v in state.items()} - - -def serialize_model(model): - args, kwargs = model._init_args_kwargs - state = copy_state(model.state_dict()) - return {"class": model.__class__, "args": args, "kwargs": kwargs, "state": state} - - -@contextmanager -def swap_state(model, state): - """ - Context manager that swaps the state of a model, e.g: - - # model is in old state - with swap_state(model, new_state): - # model in new state - # model back to old state - """ - old_state = copy_state(model.state_dict()) - model.load_state_dict(state) - try: - yield - finally: - model.load_state_dict(old_state) - - -def pull_metric(history, name): - out = [] - for metrics in history: - if name in metrics: - out.append(metrics[name]) - return out - - -class LogProgress: - """ - Sort of like tqdm but using log lines and not as real time. - Args: - - logger: logger obtained from `logging.getLogger`, - - iterable: iterable object to wrap - - updates (int): number of lines that will be printed, e.g. - if `updates=5`, log every 1/5th of the total length. - - total (int): length of the iterable, in case it does not support - `len`. - - name (str): prefix to use in the log. - - level: logging level (like `logging.INFO`). - """ - def __init__(self, - logger, - iterable, - updates=5, - total=None, - name="LogProgress", - level=logging.INFO): - self.iterable = iterable - self.total = total or len(iterable) - self.updates = updates - self.name = name - self.logger = logger - self.level = level - - def update(self, **infos): - self._infos = infos - - def __iter__(self): - self._iterator = iter(self.iterable) - self._index = -1 - self._infos = {} - self._begin = time.time() - return self - - def __next__(self): - self._index += 1 - try: - value = next(self._iterator) - except StopIteration: - raise - else: - return value - finally: - log_every = max(1, self.total // self.updates) - # logging is delayed by 1 it, in order to have the metrics from update - if self._index >= 1 and self._index % log_every == 0: - self._log() - - def _log(self): - self._speed = (1 + self._index) / (time.time() - self._begin) - infos = " | ".join(f"{k.capitalize()} {v}" for k, v in self._infos.items()) - if self._speed < 1e-4: - speed = "oo sec/it" - elif self._speed < 0.1: - speed = f"{1/self._speed:.1f} sec/it" - else: - speed = f"{self._speed:.1f} it/sec" - out = f"{self.name} | {self._index}/{self.total} | {speed}" - if infos: - out += " | " + infos - self.logger.log(self.level, out) - - -def colorize(text, color): - """ - Display text with some ANSI color in the terminal. - """ - code = f"\033[{color}m" - restore = "\033[0m" - return "".join([code, text, restore]) - - -def bold(text): - """ - Display text in bold in the terminal. - """ - return colorize(text, "1") - - -def cal_snr(lbl, est): - import torch - y = 10.0 * torch.log10( - torch.sum(lbl**2, dim=-1) / (torch.sum((est-lbl)**2, dim=-1) + EPS) + - EPS - ) - return y diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/model.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/model.py deleted file mode 100644 index ccf132b150a7cc1c125c1190b5fd8f43edaae685..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/model.py +++ /dev/null @@ -1,669 +0,0 @@ -from math import sqrt -import torch -import torch.distributions as distr -from torch.autograd import Variable -from torch import nn -from torch.nn import functional as F -from .layers import ConvNorm, LinearNorm, GlobalAvgPool -from .utils import to_gpu, get_mask_from_lengths - - -class LocationLayer(nn.Module): - def __init__(self, attention_n_filters, attention_kernel_size, - attention_dim): - super(LocationLayer, self).__init__() - padding = int((attention_kernel_size - 1) / 2) - self.location_conv = ConvNorm(2, attention_n_filters, - kernel_size=attention_kernel_size, - padding=padding, bias=False, stride=1, - dilation=1) - self.location_dense = LinearNorm(attention_n_filters, attention_dim, - bias=False, w_init_gain='tanh') - - def forward(self, attention_weights_cat): - processed_attention = self.location_conv(attention_weights_cat) - processed_attention = processed_attention.transpose(1, 2) - processed_attention = self.location_dense(processed_attention) - return processed_attention - - -class Attention(nn.Module): - def __init__(self, attention_rnn_dim, embedding_dim, attention_dim, - attention_location_n_filters, attention_location_kernel_size): - super(Attention, self).__init__() - self.query_layer = LinearNorm(attention_rnn_dim, attention_dim, - bias=False, w_init_gain='tanh') - self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False, - w_init_gain='tanh') - self.v = LinearNorm(attention_dim, 1, bias=False) - self.location_layer = LocationLayer(attention_location_n_filters, - attention_location_kernel_size, - attention_dim) - self.score_mask_value = -float("inf") - - def get_alignment_energies(self, query, processed_memory, - attention_weights_cat): - """ - PARAMS - ------ - query: decoder output (batch, n_mel_channels * n_frames_per_step) - processed_memory: processed encoder outputs (B, T_in, attention_dim) - attention_weights_cat: cumulative and prev. att weights (B, 2, max_time) - - RETURNS - ------- - alignment (batch, max_time) - """ - - processed_query = self.query_layer(query.unsqueeze(1)) - processed_attention_weights = self.location_layer(attention_weights_cat) - energies = self.v(torch.tanh( - processed_query + processed_attention_weights + processed_memory)) - - energies = energies.squeeze(-1) - return energies - - def forward(self, attention_hidden_state, memory, processed_memory, - attention_weights_cat, mask): - """ - PARAMS - ------ - attention_hidden_state: attention rnn last output - memory: encoder outputs - processed_memory: processed encoder outputs - attention_weights_cat: previous and cummulative attention weights - mask: binary mask for padded data - """ - alignment = self.get_alignment_energies( - attention_hidden_state, processed_memory, attention_weights_cat) - - if mask is not None: - alignment.data.masked_fill_(mask, self.score_mask_value) - - attention_weights = F.softmax(alignment, dim=1) - attention_context = torch.bmm(attention_weights.unsqueeze(1), memory) - attention_context = attention_context.squeeze(1) - - return attention_context, attention_weights - - -class Prenet(nn.Module): - def __init__(self, in_dim, sizes): - super(Prenet, self).__init__() - in_sizes = [in_dim] + sizes[:-1] - self.layers = nn.ModuleList( - [LinearNorm(in_size, out_size, bias=False) - for (in_size, out_size) in zip(in_sizes, sizes)]) - - def forward(self, x): - for linear in self.layers: - x = F.dropout(F.relu(linear(x)), p=0.5, training=True) - return x - - -class Postnet(nn.Module): - """Postnet - - Five 1-d convolution with 512 channels and kernel size 5 - """ - - def __init__(self, hparams): - super(Postnet, self).__init__() - self.convolutions = nn.ModuleList() - - self.convolutions.append( - nn.Sequential( - ConvNorm(hparams.n_mel_channels, hparams.postnet_embedding_dim, - kernel_size=hparams.postnet_kernel_size, stride=1, - padding=int((hparams.postnet_kernel_size - 1) / 2), - dilation=1, w_init_gain='tanh'), - nn.BatchNorm1d(hparams.postnet_embedding_dim)) - ) - - for i in range(1, hparams.postnet_n_convolutions - 1): - self.convolutions.append( - nn.Sequential( - ConvNorm(hparams.postnet_embedding_dim, - hparams.postnet_embedding_dim, - kernel_size=hparams.postnet_kernel_size, stride=1, - padding=int((hparams.postnet_kernel_size - 1) / 2), - dilation=1, w_init_gain='tanh'), - nn.BatchNorm1d(hparams.postnet_embedding_dim)) - ) - - self.convolutions.append( - nn.Sequential( - ConvNorm(hparams.postnet_embedding_dim, hparams.n_mel_channels, - kernel_size=hparams.postnet_kernel_size, stride=1, - padding=int((hparams.postnet_kernel_size - 1) / 2), - dilation=1, w_init_gain='linear'), - nn.BatchNorm1d(hparams.n_mel_channels)) - ) - - def forward(self, x): - for i in range(len(self.convolutions) - 1): - x = F.dropout(torch.tanh(self.convolutions[i](x)), 0.5, self.training) - x = F.dropout(self.convolutions[-1](x), 0.5, self.training) - - return x - - -class Encoder(nn.Module): - """Encoder module: - - Three 1-d convolution banks - - Bidirectional LSTM - """ - def __init__(self, hparams): - super(Encoder, self).__init__() - - convolutions = [] - for _ in range(hparams.encoder_n_convolutions): - conv_layer = nn.Sequential( - ConvNorm(hparams.encoder_embedding_dim, - hparams.encoder_embedding_dim, - kernel_size=hparams.encoder_kernel_size, stride=1, - padding=int((hparams.encoder_kernel_size - 1) / 2), - dilation=1, w_init_gain='relu'), - nn.BatchNorm1d(hparams.encoder_embedding_dim)) - convolutions.append(conv_layer) - self.convolutions = nn.ModuleList(convolutions) - - self.lstm = nn.LSTM(hparams.encoder_embedding_dim, - int(hparams.encoder_embedding_dim / 2), 1, - batch_first=True, bidirectional=True) - - def forward(self, x, input_lengths): - for conv in self.convolutions: - x = F.dropout(F.relu(conv(x)), 0.5, self.training) - - x = x.transpose(1, 2) - - # pytorch tensor are not reversible, hence the conversion - input_lengths = input_lengths.cpu().numpy() - x = nn.utils.rnn.pack_padded_sequence( - x, input_lengths, batch_first=True) - - self.lstm.flatten_parameters() - outputs, _ = self.lstm(x) - - outputs, _ = nn.utils.rnn.pad_packed_sequence( - outputs, batch_first=True) - - return outputs - - def inference(self, x): - for conv in self.convolutions: - x = F.dropout(F.relu(conv(x)), 0.5, self.training) - - x = x.transpose(1, 2) - - self.lstm.flatten_parameters() - outputs, _ = self.lstm(x) - - return outputs - - -class AudioEncoder(nn.Module): - def __init__(self, hparams): - super(AudioEncoder, self).__init__() - - assert hparams.lat_dim > 0 - - convolutions = [] - inp_dim = hparams.n_mel_channels - for _ in range(hparams.lat_n_convolutions): - conv_layer = nn.Sequential( - ConvNorm(inp_dim, hparams.lat_n_filters, - kernel_size=hparams.lat_kernel_size, stride=1, - padding=int((hparams.lat_kernel_size - 1) / 2), - dilation=1, w_init_gain='tanh'), - nn.BatchNorm1d(hparams.lat_n_filters)) - inp_dim = hparams.lat_n_filters - convolutions.append(conv_layer) - self.convolutions = nn.ModuleList(convolutions) - - self.lstm = nn.LSTM(hparams.lat_n_filters, - int(hparams.lat_n_filters / 2), - hparams.lat_n_blstms, batch_first=True, - bidirectional=True) - self.pool = GlobalAvgPool() - - self.mu_proj = LinearNorm(hparams.lat_n_filters, hparams.lat_dim) - self.logvar_proj = LinearNorm(hparams.lat_n_filters, hparams.lat_dim) - self.lat_dim = hparams.lat_dim - - def forward(self, x, lengths): - """ - Args: - x (torch.Tensor): (B, F, T) - """ - - for conv in self.convolutions: - x = F.dropout(F.tanh(conv(x)), 0.5, self.training) - - x = x.transpose(1, 2) # (B, T, D) - - # x may not be sorted by length. Sort->process->unsort - max_len = x.size(1) - assert max_len == torch.max(lengths).item() - - lengths, perm_idx = lengths.sort(0, descending=True) - x = x[perm_idx] - x = nn.utils.rnn.pack_padded_sequence(x, lengths, batch_first=True) - - self.lstm.flatten_parameters() - outputs, _ = self.lstm(x) - outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True) - - _, unperm_idx = perm_idx.sort(0) - outputs = outputs[unperm_idx] # (B, T, D) - lengths = lengths[unperm_idx] # (B, T, D) - - outputs = self.pool(outputs, lengths) # (B, D) - - mu = self.mu_proj(outputs) - logvar = self.logvar_proj(outputs) - z = distr.Normal(mu, logvar).rsample() - return z, mu, logvar - - -class Decoder(nn.Module): - def __init__(self, hparams): - super(Decoder, self).__init__() - self.n_mel_channels = hparams.n_mel_channels - self.n_frames_per_step = hparams.n_frames_per_step - self.encoder_embedding_dim = hparams.encoder_embedding_dim - self.obs_dim = hparams.obs_dim - self.lat_dim = hparams.lat_dim - self.attention_rnn_dim = hparams.attention_rnn_dim - self.decoder_rnn_dim = hparams.decoder_rnn_dim - self.prenet_dim = hparams.prenet_dim - self.max_decoder_steps = hparams.max_decoder_steps - self.gate_threshold = hparams.gate_threshold - self.p_attention_dropout = hparams.p_attention_dropout - self.p_decoder_dropout = hparams.p_decoder_dropout - - self.prenet = Prenet( - hparams.n_mel_channels * hparams.n_frames_per_step, - [hparams.prenet_dim, hparams.prenet_dim]) - - self.attention_rnn = nn.LSTMCell( - hparams.prenet_dim + hparams.encoder_embedding_dim, - hparams.attention_rnn_dim) - - self.attention_layer = Attention( - hparams.attention_rnn_dim, hparams.encoder_embedding_dim, - hparams.attention_dim, hparams.attention_location_n_filters, - hparams.attention_location_kernel_size) - - encoder_tot_dim = (hparams.encoder_embedding_dim + \ - hparams.lat_dim + hparams.obs_dim) - self.decoder_rnn = nn.LSTMCell( - hparams.attention_rnn_dim + encoder_tot_dim, - hparams.decoder_rnn_dim, 1) - - self.linear_projection = LinearNorm( - hparams.decoder_rnn_dim + encoder_tot_dim, - hparams.n_mel_channels * hparams.n_frames_per_step) - - self.gate_layer = LinearNorm( - hparams.decoder_rnn_dim + encoder_tot_dim, 1, - bias=True, w_init_gain='sigmoid') - - def get_go_frame(self, memory): - """ Gets all zeros frames to use as first decoder input - PARAMS - ------ - memory: decoder outputs - - RETURNS - ------- - decoder_input: all zeros frames - """ - B = memory.size(0) - decoder_input = Variable(memory.data.new( - B, self.n_mel_channels * self.n_frames_per_step).zero_()) - return decoder_input - - def initialize_decoder_states(self, memory, obs_and_lat, mask): - """ Initializes attention rnn states, decoder rnn states, attention - weights, attention cumulative weights, attention context, stores memory - and stores processed memory - PARAMS - ------ - memory: Encoder outputs - obs_and_lat: Observed and latent attribute embeddings - mask: Mask for padded data if training, expects None for inference - """ - B = memory.size(0) - MAX_TIME = memory.size(1) - - self.attention_hidden = Variable(memory.data.new( - B, self.attention_rnn_dim).zero_()) - self.attention_cell = Variable(memory.data.new( - B, self.attention_rnn_dim).zero_()) - - self.decoder_hidden = Variable(memory.data.new( - B, self.decoder_rnn_dim).zero_()) - self.decoder_cell = Variable(memory.data.new( - B, self.decoder_rnn_dim).zero_()) - - self.attention_weights = Variable(memory.data.new( - B, MAX_TIME).zero_()) - self.attention_weights_cum = Variable(memory.data.new( - B, MAX_TIME).zero_()) - self.attention_context = Variable(memory.data.new( - B, self.encoder_embedding_dim).zero_()) - - self.memory = memory - self.processed_memory = self.attention_layer.memory_layer(memory) - self.obs_and_lat = obs_and_lat - self.mask = mask - - def parse_decoder_inputs(self, decoder_inputs): - """ Prepares decoder inputs, i.e. mel outputs - PARAMS - ------ - decoder_inputs: inputs used for teacher-forced training, i.e. mel-specs - - RETURNS - ------- - inputs: processed decoder inputs - - """ - # (B, n_mel_channels, T_out) -> (B, T_out, n_mel_channels) - decoder_inputs = decoder_inputs.transpose(1, 2) - decoder_inputs = decoder_inputs.view( - decoder_inputs.size(0), - int(decoder_inputs.size(1)/self.n_frames_per_step), -1) - # (B, T_out, n_mel_channels) -> (T_out, B, n_mel_channels) - decoder_inputs = decoder_inputs.transpose(0, 1) - return decoder_inputs - - def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments): - """ Prepares decoder outputs for output - PARAMS - ------ - mel_outputs: - gate_outputs: gate output energies - alignments: - - RETURNS - ------- - mel_outputs: - gate_outpust: gate output energies - alignments: - """ - # (T_out, B) -> (B, T_out) - alignments = torch.stack(alignments).transpose(0, 1) - # (T_out, B) -> (B, T_out) - gate_outputs = torch.stack(gate_outputs).transpose(0, 1) - gate_outputs = gate_outputs.contiguous() - # (T_out, B, n_mel_channels) -> (B, T_out, n_mel_channels) - mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous() - # decouple frames per step - mel_outputs = mel_outputs.view( - mel_outputs.size(0), -1, self.n_mel_channels) - # (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out) - mel_outputs = mel_outputs.transpose(1, 2) - - return mel_outputs, gate_outputs, alignments - - def decode(self, decoder_input): - """ Decoder step using stored states, attention and memory - PARAMS - ------ - decoder_input: previous mel output - - RETURNS - ------- - mel_output: - gate_output: gate output energies - attention_weights: - """ - cell_input = torch.cat((decoder_input, self.attention_context), -1) - self.attention_hidden, self.attention_cell = self.attention_rnn( - cell_input, (self.attention_hidden, self.attention_cell)) - self.attention_hidden = F.dropout( - self.attention_hidden, self.p_attention_dropout, self.training) - - attention_weights_cat = torch.cat( - (self.attention_weights.unsqueeze(1), - self.attention_weights_cum.unsqueeze(1)), dim=1) - self.attention_context, self.attention_weights = self.attention_layer( - self.attention_hidden, self.memory, self.processed_memory, - attention_weights_cat, self.mask) - - self.attention_weights_cum += self.attention_weights - decoder_input = torch.cat( - (self.attention_hidden, self.attention_context), -1) - if self.obs_and_lat is not None: - decoder_input = torch.cat((decoder_input, self.obs_and_lat), -1) - self.decoder_hidden, self.decoder_cell = self.decoder_rnn( - decoder_input, (self.decoder_hidden, self.decoder_cell)) - self.decoder_hidden = F.dropout( - self.decoder_hidden, self.p_decoder_dropout, self.training) - - decoder_hidden_attention_context = torch.cat( - (self.decoder_hidden, self.attention_context), dim=1) - if self.obs_and_lat is not None: - decoder_hidden_attention_context = torch.cat( - (decoder_hidden_attention_context, self.obs_and_lat), dim=1) - decoder_output = self.linear_projection( - decoder_hidden_attention_context) - - gate_prediction = self.gate_layer(decoder_hidden_attention_context) - return decoder_output, gate_prediction, self.attention_weights - - def forward(self, memory, obs_and_lat, decoder_inputs, memory_lengths): - """ Decoder forward pass for training - PARAMS - ------ - memory: Encoder outputs - obs_and_lat: Observed and latent attribute embeddings - decoder_inputs: Decoder inputs for teacher forcing. i.e. mel-specs - memory_lengths: Encoder output lengths for attention masking. - - RETURNS - ------- - mel_outputs: mel outputs from the decoder - gate_outputs: gate outputs from the decoder - alignments: sequence of attention weights from the decoder - """ - - decoder_input = self.get_go_frame(memory).unsqueeze(0) - decoder_inputs = self.parse_decoder_inputs(decoder_inputs) - decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0) - decoder_inputs = self.prenet(decoder_inputs) - - self.initialize_decoder_states( - memory, obs_and_lat, mask=~get_mask_from_lengths(memory_lengths)) - - mel_outputs, gate_outputs, alignments = [], [], [] - while len(mel_outputs) < decoder_inputs.size(0) - 1: - decoder_input = decoder_inputs[len(mel_outputs)] - mel_output, gate_output, attention_weights = self.decode( - decoder_input) - mel_outputs += [mel_output.squeeze(1)] - gate_outputs += [gate_output.squeeze()] - alignments += [attention_weights] - - mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs( - mel_outputs, gate_outputs, alignments) - - return mel_outputs, gate_outputs, alignments - - def inference(self, memory, obs_and_lat, ret_has_eos=False): - """ Decoder inference - PARAMS - ------ - memory: Encoder outputs - obs_and_lat: Observed and latent attribute embeddings - - RETURNS - ------- - mel_outputs: mel outputs from the decoder - gate_outputs: gate outputs from the decoder - alignments: sequence of attention weights from the decoder - """ - decoder_input = self.get_go_frame(memory) - - self.initialize_decoder_states(memory, obs_and_lat, mask=None) - - mel_outputs, gate_outputs, alignments = [], [], [] - has_eos = False - while True: - decoder_input = self.prenet(decoder_input) - mel_output, gate_output, alignment = self.decode(decoder_input) - - mel_outputs += [mel_output.squeeze(1)] - gate_outputs += [gate_output] - alignments += [alignment] - - if torch.sigmoid(gate_output.data) > self.gate_threshold: - has_eos = True - break - elif len(mel_outputs) == self.max_decoder_steps: - # print("Warning! Reached max decoder steps") - break - - decoder_input = mel_output - - mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs( - mel_outputs, gate_outputs, alignments) - - if ret_has_eos: - return mel_outputs, gate_outputs, alignments, has_eos - else: - return mel_outputs, gate_outputs, alignments - - -class Tacotron2(nn.Module): - def __init__(self, hparams): - super(Tacotron2, self).__init__() - self.mask_padding = hparams.mask_padding - self.fp16_run = hparams.fp16_run - self.n_mel_channels = hparams.n_mel_channels - self.n_frames_per_step = hparams.n_frames_per_step - - # initialize text encoder embedding - self.embedding = nn.Embedding( - hparams.n_symbols, hparams.symbols_embedding_dim) - std = sqrt(2.0 / (hparams.n_symbols + hparams.symbols_embedding_dim)) - val = sqrt(3.0) * std # uniform bounds for std - self.embedding.weight.data.uniform_(-val, val) - - # initialize observed attribute embedding - self.obs_embedding = None - if hparams.obs_dim > 0: - self.obs_embedding = nn.Embedding( - hparams.obs_n_class, hparams.obs_dim) - std = sqrt(2.0 / (hparams.obs_n_class + hparams.obs_dim)) - val = sqrt(3.0) * std # uniform bounds for std - self.obs_embedding.weight.data.uniform_(-val, val) - - self.encoder = Encoder(hparams) - self.decoder = Decoder(hparams) - self.postnet = Postnet(hparams) - - self.lat_encoder = None - if hparams.lat_dim > 0: - self.lat_encoder = AudioEncoder(hparams) - - def parse_batch(self, batch): - (text_padded, input_lengths, obs_labels, - mel_padded, gate_padded, output_lengths) = batch - text_padded = to_gpu(text_padded).long() - input_lengths = to_gpu(input_lengths).long() - obs_labels = to_gpu(obs_labels).long() - max_len = torch.max(input_lengths.data).item() - mel_padded = to_gpu(mel_padded).float() - gate_padded = to_gpu(gate_padded).float() - output_lengths = to_gpu(output_lengths).long() - - return ( - (text_padded, input_lengths, obs_labels, - mel_padded, max_len, output_lengths), - (mel_padded, gate_padded)) - - def parse_output(self, outputs, output_lengths=None): - if self.mask_padding and output_lengths is not None: - mask = ~get_mask_from_lengths(output_lengths) - mask = mask.expand(self.n_mel_channels, mask.size(0), mask.size(1)) - mask = mask.permute(1, 0, 2) - - outputs[0].data.masked_fill_(mask, 0.0) - outputs[1].data.masked_fill_(mask, 0.0) - outputs[2].data.masked_fill_(mask[:, 0, :], 1e3) # gate energies - - return outputs - - def forward(self, inputs): - (text_inputs, text_lengths, obs_labels, - mels, max_len, output_lengths) = inputs - text_lengths, output_lengths = text_lengths.data, output_lengths.data - - embedded_inputs = self.embedding(text_inputs).transpose(1, 2) - - encoder_outputs = self.encoder(embedded_inputs, text_lengths) - - obs = None - if self.obs_embedding is not None: - obs = self.obs_embedding(obs_labels) - - lat, lat_mu, lat_logvar = None, None, None - if self.lat_encoder is not None: - (lat, lat_mu, lat_logvar) = self.lat_encoder(mels, output_lengths) - - obs_and_lat = [x for x in [obs, lat] if x is not None] - if bool(obs_and_lat): - obs_and_lat = torch.cat(obs_and_lat, dim=-1) - else: - obs_and_lat = None - - mel_outputs, gate_outputs, alignments = self.decoder( - encoder_outputs, obs_and_lat, mels, memory_lengths=text_lengths) - - mel_outputs_postnet = self.postnet(mel_outputs) - mel_outputs_postnet = mel_outputs + mel_outputs_postnet - - return self.parse_output( - [mel_outputs, mel_outputs_postnet, gate_outputs, alignments, - lat_mu, lat_logvar], - output_lengths) - - def inference(self, inputs, obs_labels=None, lat=None, ret_has_eos=False): - embedded_inputs = self.embedding(inputs).transpose(1, 2) - encoder_outputs = self.encoder.inference(embedded_inputs) - - if obs_labels is None: - obs_labels = torch.LongTensor(len(inputs)) - obs_labels = obs_labels.to(inputs.device).zero_() - - obs = None - if self.obs_embedding is not None: - obs = self.obs_embedding(obs_labels) - - if self.lat_encoder is not None: - if lat is None: - lat = torch.FloatTensor(len(inputs), self.lat_encoder.lat_dim) - lat = lat.to(inputs.device).zero_().type(encoder_outputs.type()) - - obs_and_lat = [x for x in [obs, lat] if x is not None] - if bool(obs_and_lat): - obs_and_lat = torch.cat(obs_and_lat, dim=-1) - else: - obs_and_lat = None - - mel_outputs, gate_outputs, alignments, has_eos = self.decoder.inference( - encoder_outputs, obs_and_lat, ret_has_eos=True) - - mel_outputs_postnet = self.postnet(mel_outputs) - mel_outputs_postnet = mel_outputs + mel_outputs_postnet - - outputs = self.parse_output( - [mel_outputs, mel_outputs_postnet, gate_outputs, alignments]) - - if ret_has_eos: - return outputs + [has_eos] - else: - return outputs diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/unsupervised_quality_estimation/aggregate_scores.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/unsupervised_quality_estimation/aggregate_scores.py deleted file mode 100644 index 66d50d07ff2067b802b90a2aadd88df23153830a..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/unsupervised_quality_estimation/aggregate_scores.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import sys - -import numpy as np - - -aggregate_funcs = { - "std": np.std, - "var": np.var, - "median": np.median, - "mean": np.mean, - "min": np.min, - "max": np.max, -} - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("-i", "--input_file", required=True, type=str) - parser.add_argument("-n", "--repeat_times", required=True, type=int) - parser.add_argument("-o", "--output_file", required=False) - parser.add_argument("-f", "--func", required=False, default="mean") - args = parser.parse_args() - - stream = open(args.output_file, "w") if args.output_file else sys.stdout - - segment_scores = [] - for line in open(args.input_file): - segment_scores.append(float(line.strip())) - if len(segment_scores) == args.repeat_times: - stream.write("{}\n".format(aggregate_funcs[args.func](segment_scores))) - segment_scores = [] - - -if __name__ == "__main__": - main() diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/dynamic_loss_scaler.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/dynamic_loss_scaler.py deleted file mode 100644 index 43f9be37b9067c520cd794b9a941c57adae25e97..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/dynamic_loss_scaler.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -class DynamicLossScaler(object): - def __init__( - self, - init_scale=2.0 ** 15, - scale_factor=2.0, - scale_window=2000, - tolerance=0.0, - threshold=None, - min_loss_scale=1e-4, - ): - self.loss_scale = init_scale - self.scale_factor = scale_factor - self.scale_window = scale_window - self.tolerance = tolerance - self.threshold = threshold - self._iter = 0 - self._last_overflow_iter = -1 - self._last_rescale_iter = -1 - self._overflows_since_rescale = 0 - self.min_loss_scale = min_loss_scale - - def scale(self, outputs): - return self.loss_scale * outputs - - def update(self): - if (self._iter - self._last_overflow_iter) % self.scale_window == 0: - self.loss_scale *= self.scale_factor - self._last_rescale_iter = self._iter - self._iter += 1 - - def _decrease_loss_scale(self): - self.loss_scale /= self.scale_factor - if self.threshold is not None: - self.loss_scale = max(self.loss_scale, self.threshold) - - def check_overflow(self, grad_norm): - # detect inf and nan - if grad_norm == float("inf") or grad_norm != grad_norm: - # overflow has occured - prev_scale = self.loss_scale - iter_since_rescale = self._iter - self._last_rescale_iter - - self._last_overflow_iter = self._iter - self._overflows_since_rescale += 1 - pct_overflow = self._overflows_since_rescale / float(iter_since_rescale) - if pct_overflow >= self.tolerance: - self._decrease_loss_scale() - self._last_rescale_iter = self._iter - self._overflows_since_rescale = 0 - - if self.loss_scale <= self.min_loss_scale: - # Use FloatingPointError as an uncommon error that parent - # functions can safely catch to stop training. - self.loss_scale = prev_scale - raise FloatingPointError( - ( - "Minimum loss scale reached ({}). Your loss is probably exploding. " - "Try lowering the learning rate, using gradient clipping or " - "increasing the batch size." - ).format(self.min_loss_scale) - ) - - self._iter += 1 - raise OverflowError("setting loss scale to: " + str(self.loss_scale)) diff --git a/spaces/OIUGLK/bingo/src/lib/storage.ts b/spaces/OIUGLK/bingo/src/lib/storage.ts deleted file mode 100644 index a5b7825c4f76a28c704da512ae39e8bb45addd09..0000000000000000000000000000000000000000 --- a/spaces/OIUGLK/bingo/src/lib/storage.ts +++ /dev/null @@ -1,27 +0,0 @@ -import { getMany, set, del, clear } from 'idb-keyval'; - -export const Storage = { - async get(key: string | string[] | null): Promise { - if (key === null) return null; - if (typeof key === 'string') { - key = [key] - } - const returnData: Record = {} - const values = await getMany(key) - key.forEach((k, idx)=> { - returnData[k] = values[idx] - }) - return returnData; - }, - async set(object: any) { - for (let key of Object.keys(object)) { - await set(key, object[key]) - } - }, - async remove(key: string) { - return del(key); - }, - async clear() { - return clear(); - } -} diff --git a/spaces/OlaWod/FreeVC/speaker_encoder/audio.py b/spaces/OlaWod/FreeVC/speaker_encoder/audio.py deleted file mode 100644 index 2fcb77ad1d3a85f523e24f84691886736a5686cb..0000000000000000000000000000000000000000 --- a/spaces/OlaWod/FreeVC/speaker_encoder/audio.py +++ /dev/null @@ -1,107 +0,0 @@ -from scipy.ndimage.morphology import binary_dilation -from speaker_encoder.params_data import * -from pathlib import Path -from typing import Optional, Union -import numpy as np -import webrtcvad -import librosa -import struct - -int16_max = (2 ** 15) - 1 - - -def preprocess_wav(fpath_or_wav: Union[str, Path, np.ndarray], - source_sr: Optional[int] = None): - """ - Applies the preprocessing operations used in training the Speaker Encoder to a waveform - either on disk or in memory. The waveform will be resampled to match the data hyperparameters. - - :param fpath_or_wav: either a filepath to an audio file (many extensions are supported, not - just .wav), either the waveform as a numpy array of floats. - :param source_sr: if passing an audio waveform, the sampling rate of the waveform before - preprocessing. After preprocessing, the waveform's sampling rate will match the data - hyperparameters. If passing a filepath, the sampling rate will be automatically detected and - this argument will be ignored. - """ - # Load the wav from disk if needed - if isinstance(fpath_or_wav, str) or isinstance(fpath_or_wav, Path): - wav, source_sr = librosa.load(fpath_or_wav, sr=None) - else: - wav = fpath_or_wav - - # Resample the wav if needed - if source_sr is not None and source_sr != sampling_rate: - wav = librosa.resample(wav, source_sr, sampling_rate) - - # Apply the preprocessing: normalize volume and shorten long silences - wav = normalize_volume(wav, audio_norm_target_dBFS, increase_only=True) - wav = trim_long_silences(wav) - - return wav - - -def wav_to_mel_spectrogram(wav): - """ - Derives a mel spectrogram ready to be used by the encoder from a preprocessed audio waveform. - Note: this not a log-mel spectrogram. - """ - frames = librosa.feature.melspectrogram( - y=wav, - sr=sampling_rate, - n_fft=int(sampling_rate * mel_window_length / 1000), - hop_length=int(sampling_rate * mel_window_step / 1000), - n_mels=mel_n_channels - ) - return frames.astype(np.float32).T - - -def trim_long_silences(wav): - """ - Ensures that segments without voice in the waveform remain no longer than a - threshold determined by the VAD parameters in params.py. - - :param wav: the raw waveform as a numpy array of floats - :return: the same waveform with silences trimmed away (length <= original wav length) - """ - # Compute the voice detection window size - samples_per_window = (vad_window_length * sampling_rate) // 1000 - - # Trim the end of the audio to have a multiple of the window size - wav = wav[:len(wav) - (len(wav) % samples_per_window)] - - # Convert the float waveform to 16-bit mono PCM - pcm_wave = struct.pack("%dh" % len(wav), *(np.round(wav * int16_max)).astype(np.int16)) - - # Perform voice activation detection - voice_flags = [] - vad = webrtcvad.Vad(mode=3) - for window_start in range(0, len(wav), samples_per_window): - window_end = window_start + samples_per_window - voice_flags.append(vad.is_speech(pcm_wave[window_start * 2:window_end * 2], - sample_rate=sampling_rate)) - voice_flags = np.array(voice_flags) - - # Smooth the voice detection with a moving average - def moving_average(array, width): - array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2))) - ret = np.cumsum(array_padded, dtype=float) - ret[width:] = ret[width:] - ret[:-width] - return ret[width - 1:] / width - - audio_mask = moving_average(voice_flags, vad_moving_average_width) - audio_mask = np.round(audio_mask).astype(np.bool) - - # Dilate the voiced regions - audio_mask = binary_dilation(audio_mask, np.ones(vad_max_silence_length + 1)) - audio_mask = np.repeat(audio_mask, samples_per_window) - - return wav[audio_mask == True] - - -def normalize_volume(wav, target_dBFS, increase_only=False, decrease_only=False): - if increase_only and decrease_only: - raise ValueError("Both increase only and decrease only are set") - dBFS_change = target_dBFS - 10 * np.log10(np.mean(wav ** 2)) - if (dBFS_change < 0 and increase_only) or (dBFS_change > 0 and decrease_only): - return wav - return wav * (10 ** (dBFS_change / 20)) diff --git a/spaces/OpenDILabCommunity/DI-sheep/DI-sheep/ui/src/App.css b/spaces/OpenDILabCommunity/DI-sheep/DI-sheep/ui/src/App.css deleted file mode 100644 index e8cea4e7f87da1022b7e4ea3c4373e7a886c9e29..0000000000000000000000000000000000000000 --- a/spaces/OpenDILabCommunity/DI-sheep/DI-sheep/ui/src/App.css +++ /dev/null @@ -1,110 +0,0 @@ -#root { - text-align: center; - width: 100%; - max-width: 500px; - margin: 0 auto; -} - -.app { - width: 100%; - margin: 0 auto; -} - -.scene-container { - width: 100%; - padding-bottom: 100%; - position: relative; - margin: 10% 0; -} - -.scene-inner { - position: absolute; - left: 0; - right: 0; - bottom: 0; - top: 0; - overflow: visible; - font-size: 28px; -} - -.symbol { - width: 12.5%; - padding-bottom: 12.5%; - position: absolute; - transition: 150ms; - left: 0; - top: 0; - border-radius: 8px; -} - -.symbol-inner { - position: absolute; - left: 0; - right: 0; - bottom: 0; - top: 0; - display: flex; - justify-content: center; - align-items: center; - border-radius: 8px; - border: 2px solid #444; - transition: 0.3s; - overflow: hidden; - user-select: none; -} - -.symbol-inner img { - width: 100%; - height: 100%; - object-fit: contain; -} - -.queue-container { - border-radius: 8px; - width: 100%; - padding-bottom: 15%; - border: 2px solid gray; - margin-bottom: 16px; -} - -.flex-container { - display: flex; - gap: 8px; -} - -.flex-center { - justify-content: center; - align-items: center; -} - -.flex-grow { - flex-grow: 1; -} - -.flex-between { - justify-content: space-between; - align-items: center; -} - -.modal { - position: fixed; - width: 100vw; - height: 100vh; - display: flex; - flex-direction: column; - align-items: center; - justify-content: center; - backdrop-filter: blur(10px); - background-color: rgb(255 255 255 / 40%); - top: 0; - left: 0; -} - -.bgm-button { - position: fixed; - left: 0; - top: 0; - padding: 4px; - width: 36px; - height: 36px; -} diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/configs/Misc/mmdet_mask_rcnn_R_50_FPN_1x.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/configs/Misc/mmdet_mask_rcnn_R_50_FPN_1x.py deleted file mode 100644 index 0f2464be744c083985898a25f9e71d00104f689d..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/configs/Misc/mmdet_mask_rcnn_R_50_FPN_1x.py +++ /dev/null @@ -1,151 +0,0 @@ -# An example config to train a mmdetection model using detectron2. - -from ..common.data.coco import dataloader -from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier -from ..common.optim import SGD as optimizer -from ..common.train import train - -from detectron2.modeling.mmdet_wrapper import MMDetDetector -from detectron2.config import LazyCall as L - -model = L(MMDetDetector)( - detector=dict( - type="MaskRCNN", - pretrained="torchvision://resnet50", - backbone=dict( - type="ResNet", - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type="BN", requires_grad=True), - norm_eval=True, - style="pytorch", - ), - neck=dict(type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), - rpn_head=dict( - type="RPNHead", - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type="AnchorGenerator", - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64], - ), - bbox_coder=dict( - type="DeltaXYWHBBoxCoder", - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[1.0, 1.0, 1.0, 1.0], - ), - loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type="L1Loss", loss_weight=1.0), - ), - roi_head=dict( - type="StandardRoIHead", - bbox_roi_extractor=dict( - type="SingleRoIExtractor", - roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32], - ), - bbox_head=dict( - type="Shared2FCBBoxHead", - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type="DeltaXYWHBBoxCoder", - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[0.1, 0.1, 0.2, 0.2], - ), - reg_class_agnostic=False, - loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type="L1Loss", loss_weight=1.0), - ), - mask_roi_extractor=dict( - type="SingleRoIExtractor", - roi_layer=dict(type="RoIAlign", output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32], - ), - mask_head=dict( - type="FCNMaskHead", - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_mask=dict(type="CrossEntropyLoss", use_mask=True, loss_weight=1.0), - ), - ), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type="MaxIoUAssigner", - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1, - ), - sampler=dict( - type="RandomSampler", - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False, - ), - allowed_border=-1, - pos_weight=-1, - debug=False, - ), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type="nms", iou_threshold=0.7), - min_bbox_size=0, - ), - rcnn=dict( - assigner=dict( - type="MaxIoUAssigner", - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=True, - ignore_iof_thr=-1, - ), - sampler=dict( - type="RandomSampler", - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True, - ), - mask_size=28, - pos_weight=-1, - debug=False, - ), - ), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type="nms", iou_threshold=0.7), - min_bbox_size=0, - ), - rcnn=dict( - score_thr=0.05, - nms=dict(type="nms", iou_threshold=0.5), - max_per_img=100, - mask_thr_binary=0.5, - ), - ), - ), - pixel_mean=[123.675, 116.280, 103.530], - pixel_std=[58.395, 57.120, 57.375], -) - -dataloader.train.mapper.image_format = "RGB" # torchvision pretrained model -train.init_checkpoint = None # pretrained model is loaded inside backbone diff --git a/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/oneformer/modeling/pixel_decoder/ops/src/cpu/ms_deform_attn_cpu.cpp b/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/oneformer/modeling/pixel_decoder/ops/src/cpu/ms_deform_attn_cpu.cpp deleted file mode 100644 index 48757e2b0156b2c1513b615d2a17e5aee5172ae7..0000000000000000000000000000000000000000 --- a/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/oneformer/modeling/pixel_decoder/ops/src/cpu/ms_deform_attn_cpu.cpp +++ /dev/null @@ -1,46 +0,0 @@ -/*! -************************************************************************************************** -* Deformable DETR -* Copyright (c) 2020 SenseTime. All Rights Reserved. -* Licensed under the Apache License, Version 2.0 [see LICENSE for details] -************************************************************************************************** -* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -************************************************************************************************** -*/ - -/*! -* Copyright (c) Facebook, Inc. and its affiliates. -* Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR -*/ - -#include - -#include -#include - - -at::Tensor -ms_deform_attn_cpu_forward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const int im2col_step) -{ - AT_ERROR("Not implement on cpu"); -} - -std::vector -ms_deform_attn_cpu_backward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const at::Tensor &grad_output, - const int im2col_step) -{ - AT_ERROR("Not implement on cpu"); -} - diff --git a/spaces/PKUWilliamYang/VToonify/vtoonify/model/stylegan/op_gpu/upfirdn2d.cpp b/spaces/PKUWilliamYang/VToonify/vtoonify/model/stylegan/op_gpu/upfirdn2d.cpp deleted file mode 100644 index 73928ece8150f847d98af65a95685a29fcceecde..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/VToonify/vtoonify/model/stylegan/op_gpu/upfirdn2d.cpp +++ /dev/null @@ -1,31 +0,0 @@ -#include -#include - -torch::Tensor upfirdn2d_op(const torch::Tensor &input, - const torch::Tensor &kernel, int up_x, int up_y, - int down_x, int down_y, int pad_x0, int pad_x1, - int pad_y0, int pad_y1); - -#define CHECK_CUDA(x) \ - TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) \ - TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) \ - CHECK_CUDA(x); \ - CHECK_CONTIGUOUS(x) - -torch::Tensor upfirdn2d(const torch::Tensor &input, const torch::Tensor &kernel, - int up_x, int up_y, int down_x, int down_y, int pad_x0, - int pad_x1, int pad_y0, int pad_y1) { - CHECK_INPUT(input); - CHECK_INPUT(kernel); - - at::DeviceGuard guard(input.device()); - - return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, - pad_y0, pad_y1); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)"); -} \ No newline at end of file diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/sxml/transform.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/sxml/transform.go deleted file mode 100644 index 87bd2df9c139c4a9bde78a7ec84753b0de3a8068..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/sxml/transform.go and /dev/null differ diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/decode_heads/apc_head.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/decode_heads/apc_head.py deleted file mode 100644 index c7038bdbe0edf2a1f184b6899486d2d190dda076..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/decode_heads/apc_head.py +++ /dev/null @@ -1,158 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from annotator.uniformer.mmcv.cnn import ConvModule - -from annotator.uniformer.mmseg.ops import resize -from ..builder import HEADS -from .decode_head import BaseDecodeHead - - -class ACM(nn.Module): - """Adaptive Context Module used in APCNet. - - Args: - pool_scale (int): Pooling scale used in Adaptive Context - Module to extract region features. - fusion (bool): Add one conv to fuse residual feature. - in_channels (int): Input channels. - channels (int): Channels after modules, before conv_seg. - conv_cfg (dict | None): Config of conv layers. - norm_cfg (dict | None): Config of norm layers. - act_cfg (dict): Config of activation layers. - """ - - def __init__(self, pool_scale, fusion, in_channels, channels, conv_cfg, - norm_cfg, act_cfg): - super(ACM, self).__init__() - self.pool_scale = pool_scale - self.fusion = fusion - self.in_channels = in_channels - self.channels = channels - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.pooled_redu_conv = ConvModule( - self.in_channels, - self.channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - self.input_redu_conv = ConvModule( - self.in_channels, - self.channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - self.global_info = ConvModule( - self.channels, - self.channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - self.gla = nn.Conv2d(self.channels, self.pool_scale**2, 1, 1, 0) - - self.residual_conv = ConvModule( - self.channels, - self.channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - if self.fusion: - self.fusion_conv = ConvModule( - self.channels, - self.channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def forward(self, x): - """Forward function.""" - pooled_x = F.adaptive_avg_pool2d(x, self.pool_scale) - # [batch_size, channels, h, w] - x = self.input_redu_conv(x) - # [batch_size, channels, pool_scale, pool_scale] - pooled_x = self.pooled_redu_conv(pooled_x) - batch_size = x.size(0) - # [batch_size, pool_scale * pool_scale, channels] - pooled_x = pooled_x.view(batch_size, self.channels, - -1).permute(0, 2, 1).contiguous() - # [batch_size, h * w, pool_scale * pool_scale] - affinity_matrix = self.gla(x + resize( - self.global_info(F.adaptive_avg_pool2d(x, 1)), size=x.shape[2:]) - ).permute(0, 2, 3, 1).reshape( - batch_size, -1, self.pool_scale**2) - affinity_matrix = F.sigmoid(affinity_matrix) - # [batch_size, h * w, channels] - z_out = torch.matmul(affinity_matrix, pooled_x) - # [batch_size, channels, h * w] - z_out = z_out.permute(0, 2, 1).contiguous() - # [batch_size, channels, h, w] - z_out = z_out.view(batch_size, self.channels, x.size(2), x.size(3)) - z_out = self.residual_conv(z_out) - z_out = F.relu(z_out + x) - if self.fusion: - z_out = self.fusion_conv(z_out) - - return z_out - - -@HEADS.register_module() -class APCHead(BaseDecodeHead): - """Adaptive Pyramid Context Network for Semantic Segmentation. - - This head is the implementation of - `APCNet `_. - - Args: - pool_scales (tuple[int]): Pooling scales used in Adaptive Context - Module. Default: (1, 2, 3, 6). - fusion (bool): Add one conv to fuse residual feature. - """ - - def __init__(self, pool_scales=(1, 2, 3, 6), fusion=True, **kwargs): - super(APCHead, self).__init__(**kwargs) - assert isinstance(pool_scales, (list, tuple)) - self.pool_scales = pool_scales - self.fusion = fusion - acm_modules = [] - for pool_scale in self.pool_scales: - acm_modules.append( - ACM(pool_scale, - self.fusion, - self.in_channels, - self.channels, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg)) - self.acm_modules = nn.ModuleList(acm_modules) - self.bottleneck = ConvModule( - self.in_channels + len(pool_scales) * self.channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - acm_outs = [x] - for acm_module in self.acm_modules: - acm_outs.append(acm_module(x)) - acm_outs = torch.cat(acm_outs, dim=1) - output = self.bottleneck(acm_outs) - output = self.cls_seg(output) - return output diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/decode_heads/aspp_head.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/decode_heads/aspp_head.py deleted file mode 100644 index aa914b5bb25124d1ff199553d96713d6a80484c0..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/decode_heads/aspp_head.py +++ /dev/null @@ -1,107 +0,0 @@ -import torch -import torch.nn as nn -from annotator.uniformer.mmcv.cnn import ConvModule - -from annotator.uniformer.mmseg.ops import resize -from ..builder import HEADS -from .decode_head import BaseDecodeHead - - -class ASPPModule(nn.ModuleList): - """Atrous Spatial Pyramid Pooling (ASPP) Module. - - Args: - dilations (tuple[int]): Dilation rate of each layer. - in_channels (int): Input channels. - channels (int): Channels after modules, before conv_seg. - conv_cfg (dict|None): Config of conv layers. - norm_cfg (dict|None): Config of norm layers. - act_cfg (dict): Config of activation layers. - """ - - def __init__(self, dilations, in_channels, channels, conv_cfg, norm_cfg, - act_cfg): - super(ASPPModule, self).__init__() - self.dilations = dilations - self.in_channels = in_channels - self.channels = channels - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - for dilation in dilations: - self.append( - ConvModule( - self.in_channels, - self.channels, - 1 if dilation == 1 else 3, - dilation=dilation, - padding=0 if dilation == 1 else dilation, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg)) - - def forward(self, x): - """Forward function.""" - aspp_outs = [] - for aspp_module in self: - aspp_outs.append(aspp_module(x)) - - return aspp_outs - - -@HEADS.register_module() -class ASPPHead(BaseDecodeHead): - """Rethinking Atrous Convolution for Semantic Image Segmentation. - - This head is the implementation of `DeepLabV3 - `_. - - Args: - dilations (tuple[int]): Dilation rates for ASPP module. - Default: (1, 6, 12, 18). - """ - - def __init__(self, dilations=(1, 6, 12, 18), **kwargs): - super(ASPPHead, self).__init__(**kwargs) - assert isinstance(dilations, (list, tuple)) - self.dilations = dilations - self.image_pool = nn.Sequential( - nn.AdaptiveAvgPool2d(1), - ConvModule( - self.in_channels, - self.channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg)) - self.aspp_modules = ASPPModule( - dilations, - self.in_channels, - self.channels, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.bottleneck = ConvModule( - (len(dilations) + 1) * self.channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - aspp_outs = [ - resize( - self.image_pool(x), - size=x.size()[2:], - mode='bilinear', - align_corners=self.align_corners) - ] - aspp_outs.extend(self.aspp_modules(x)) - aspp_outs = torch.cat(aspp_outs, dim=1) - output = self.bottleneck(aspp_outs) - output = self.cls_seg(output) - return output diff --git a/spaces/PikeAndVine/resize_color/app.py b/spaces/PikeAndVine/resize_color/app.py deleted file mode 100644 index ea6fcf80afb30047403263ee5264abbd7878b303..0000000000000000000000000000000000000000 --- a/spaces/PikeAndVine/resize_color/app.py +++ /dev/null @@ -1,66 +0,0 @@ -import gradio as gr -import cv2 -import numpy as np -from PIL import Image - -def hex_to_rgb(hex_code): - # Remove the '#' symbol if present - hex_code = hex_code.lstrip('#') - - # Convert the hex code to RGB - return tuple(int(hex_code[i:i+2], 16) for i in (0, 2, 4)) - -def resize_and_overlay_image(input_image, reduction_percentage, shift_pixels, shift_pixels_ud, background_color): - # Check if the input image is empty - if input_image.size == 0: - return None - - img = np.array(input_image) - - # Check if the image has shape information - if img.ndim < 2: - return None - - # Get the image dimensions - height, width = img.shape[:2] - - # Calculate the new dimensions based on the reduction percentage - new_height = int(height * reduction_percentage / 100) - new_width = int(width * reduction_percentage / 100) - - # Resize the image - resized_img = cv2.resize(img, (new_width, new_height)) - - # Convert the hex code to RGB - background_rgb = hex_to_rgb(background_color) - - # Create a background image with the original image dimensions and specified color - background_img = np.ones((height, width, 3), dtype=np.uint8) * background_rgb - - # Calculate the position to overlay the resized image on the background image - x = int((width - new_width) / 2) + int(shift_pixels) - y = int((height - new_height) / 2) + int(shift_pixels_ud) - - # Overlay the resized image on the background image - background_img[y:y + new_height, x:x + new_width] = resized_img - - # Return the resulting image as a NumPy array - return background_img - -# Create the Gradio interface -iface = gr.Interface( - fn=resize_and_overlay_image, - inputs=[ - gr.inputs.Image(type="pil", label="Input Image"), - gr.inputs.Slider(minimum=0, maximum=100, step=10, default=80, label="Percentage of Original"), - gr.inputs.Slider(minimum=-150, maximum=150, step=10, default=0, label="Shift Pixels Left / Right"), - gr.inputs.Slider(minimum=-150, maximum=250, step=10, default=0, label="Shift Pixels Up / Down"), - gr.inputs.Textbox(default="#ffffff", label="Background Color (Hex Code)") - ], - outputs=gr.outputs.Image(type="numpy", label="Result"), - title="Image Resizer", - description="Reduce the size of an image and overlay it on a colored background and shift it to the right." -) - -if __name__ == "__main__": - iface.launch() \ No newline at end of file diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/modeling/detector/generalized_vl_rcnn.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/modeling/detector/generalized_vl_rcnn.py deleted file mode 100644 index f7d19ead5dc8f02f7128c97d00da0e85f37aa19e..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/modeling/detector/generalized_vl_rcnn.py +++ /dev/null @@ -1,466 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -""" -Implements the Generalized VL R-CNN framework -""" - -import torch -from torch import nn -import torch.nn.functional as F - -from maskrcnn_benchmark.structures.image_list import to_image_list -from maskrcnn_benchmark.structures.bounding_box import BoxList -from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist - -from ..backbone import build_backbone -from ..rpn import build_rpn -from ..roi_heads import build_roi_heads - -from ..language_backbone import build_language_backbone -from transformers import AutoTokenizer - -import random -import timeit -import pdb -from copy import deepcopy - -def random_word(input_ids, mask_token_id, vocabs, padding_token_id, greenlight_map): - """ - greenlight_map, batch_size x 256 (seq_len): - 0 means this location cannot be calculated in the MLM loss - -1 means this location cannot be masked!! - 1 means this location can be masked and can be calculated in the MLM loss - """ - output_label = deepcopy(input_ids) - for j in range(input_ids.size(0)): - for i in range(input_ids.size(1)): - prob = random.random() - # mask token with probability - ratio = 0.15 - if greenlight_map is not None and greenlight_map[j,i] == -1: - output_label[j,i] = -100 - continue - - if (not input_ids[j,i] == padding_token_id) and prob < ratio: - prob /= ratio - - # 80% randomly change token to mask token - if prob < 0.8: - input_ids[j,i] = mask_token_id - - # 10% randomly change token to random token - elif prob < 0.9: - input_ids[j,i] = random.choice(vocabs) - - else: - # no masking token (will be ignored by loss function later) - output_label[j,i] = -100 - - if greenlight_map is not None and greenlight_map[j,i] != 1: - output_label[j,i] = -100 # If this location should not be masked - return input_ids, output_label - - -class GeneralizedVLRCNN(nn.Module): - """ - Main class for Generalized R-CNN. Currently supports boxes and masks. - It consists of three main parts: - - backbone - - rpn - - heads: takes the features + the proposals from the RPN and computes - detections / masks from it. - """ - - def __init__(self, cfg): - super(GeneralizedVLRCNN, self).__init__() - self.cfg = cfg - - # visual encoder - self.backbone = build_backbone(cfg) - - # language encoder - if cfg.MODEL.LANGUAGE_BACKBONE.TOKENIZER_TYPE == "clip": - # self.tokenizer = build_tokenizer("clip") - from transformers import CLIPTokenizerFast - if cfg.MODEL.DYHEAD.FUSE_CONFIG.MLM_LOSS: - print("Reuse token 'ðŁĴij' (token_id = 49404) for mask token!") - self.tokenizer = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32", - from_slow=True, mask_token='ðŁĴij') - else: - self.tokenizer = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32", - from_slow=True) - else: - self.tokenizer = AutoTokenizer.from_pretrained(cfg.MODEL.LANGUAGE_BACKBONE.TOKENIZER_TYPE) - self.tokenizer_vocab = self.tokenizer.get_vocab() - self.tokenizer_vocab_ids = [item for key, item in self.tokenizer_vocab.items()] - - self.language_backbone = build_language_backbone(cfg) - - self.rpn = build_rpn(cfg) - self.roi_heads = build_roi_heads(cfg) - self.DEBUG = cfg.MODEL.DEBUG - - self.freeze_backbone = cfg.MODEL.BACKBONE.FREEZE - self.freeze_fpn = cfg.MODEL.FPN.FREEZE - self.freeze_rpn = cfg.MODEL.RPN.FREEZE - self.add_linear_layer = cfg.MODEL.DYHEAD.FUSE_CONFIG.ADD_LINEAR_LAYER - - self.force_boxes = cfg.MODEL.RPN.FORCE_BOXES - - if cfg.MODEL.LINEAR_PROB: - assert cfg.MODEL.BACKBONE.FREEZE, "For linear probing, backbone should be frozen!" - if hasattr(self.backbone, 'fpn'): - assert cfg.MODEL.FPN.FREEZE, "For linear probing, FPN should be frozen!" - self.linear_prob = cfg.MODEL.LINEAR_PROB - self.freeze_cls_logits = cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_DOT_PRODUCT_TOKEN_LOSS - if cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_DOT_PRODUCT_TOKEN_LOSS: - # disable cls_logits - if hasattr(self.rpn.head, 'cls_logits'): - for p in self.rpn.head.cls_logits.parameters(): - p.requires_grad = False - - self.freeze_language_backbone = self.cfg.MODEL.LANGUAGE_BACKBONE.FREEZE - if self.cfg.MODEL.LANGUAGE_BACKBONE.FREEZE: - for p in self.language_backbone.parameters(): - p.requires_grad = False - - self.use_mlm_loss = cfg.MODEL.DYHEAD.FUSE_CONFIG.MLM_LOSS - self.mlm_loss_for_only_positives = cfg.MODEL.DYHEAD.FUSE_CONFIG.MLM_LOSS_FOR_ONLY_POSITIVES - - if self.cfg.GLIPKNOW.KNOWLEDGE_FILE: - from maskrcnn_benchmark.data.datasets.tsv import load_from_yaml_file - self.class_name_to_knowledge = load_from_yaml_file(self.cfg.GLIPKNOW.KNOWLEDGE_FILE) - self.class_name_list = sorted([k for k in self.class_name_to_knowledge]) - - def train(self, mode=True): - """Convert the model into training mode while keep layers freezed.""" - super(GeneralizedVLRCNN, self).train(mode) - if self.freeze_backbone: - self.backbone.body.eval() - for p in self.backbone.body.parameters(): - p.requires_grad = False - if self.freeze_fpn: - self.backbone.fpn.eval() - for p in self.backbone.fpn.parameters(): - p.requires_grad = False - if self.freeze_rpn: - if hasattr(self.rpn, 'head'): - self.rpn.head.eval() - for p in self.rpn.parameters(): - p.requires_grad = False - if self.linear_prob: - if self.rpn is not None: - for key, value in self.rpn.named_parameters(): - if not ('bbox_pred' in key or 'cls_logits' in key or 'centerness' in key or 'cosine_scale' in key or 'dot_product_projection_text' in key or 'head.log_scale' in key or 'head.bias_lang' in key or 'head.bias0' in key): - value.requires_grad = False - if self.roi_heads is not None: - for key, value in self.roi_heads.named_parameters(): - if not ('bbox_pred' in key or 'cls_logits' in key or 'centerness' in key or 'cosine_scale' in key or 'dot_product_projection_text' in key or 'head.log_scale' in key or 'head.bias_lang' in key or 'head.bias0' in key): - value.requires_grad = False - if self.freeze_cls_logits: - if hasattr(self.rpn.head, 'cls_logits'): - self.rpn.head.cls_logits.eval() - for p in self.rpn.head.cls_logits.parameters(): - p.requires_grad = False - if self.add_linear_layer: - if self.rpn is not None: - for key, p in self.rpn.named_parameters(): - if 'tunable_linear' in key: - p.requires_grad = True - - if self.freeze_language_backbone: - self.language_backbone.eval() - for p in self.language_backbone.parameters(): - p.requires_grad = False - - def forward(self, - images, - targets=None, - captions=None, - positive_map=None, - greenlight_map=None): - """ - Arguments: - images (list[Tensor] or ImageList): images to be processed - targets (list[BoxList]): ground-truth boxes present in the image (optional) - - mask_black_list: batch x 256, indicates whether or not a certain token is maskable or not - - Returns: - result (list[BoxList] or dict[Tensor]): the output from the model. - During training, it returns a dict[Tensor] which contains the losses. - During testing, it returns list[BoxList] contains additional fields - like `scores`, `labels` and `mask` (for Mask R-CNN models). - - """ - if self.training and targets is None: - raise ValueError("In training mode, targets should be passed") - - images = to_image_list(images) - # batch_size = images.tensors.shape[0] - device = images.tensors.device - - - if self.cfg.GLIPKNOW.PARALLEL_LANGUAGE_INPUT: - language_dict_features, positive_map = self._forward_language_parallel( - captions=captions, targets=targets, device=device, - positive_map=positive_map) - else: - # language embedding - language_dict_features = {} - if captions is not None: - #print(captions[0]) - tokenized = self.tokenizer.batch_encode_plus(captions, - max_length=self.cfg.MODEL.LANGUAGE_BACKBONE.MAX_QUERY_LEN, - padding='max_length' if self.cfg.MODEL.LANGUAGE_BACKBONE.PAD_MAX else "longest", - return_special_tokens_mask=True, - return_tensors='pt', - truncation=True).to(device) - if self.use_mlm_loss: - if not self.mlm_loss_for_only_positives: - greenlight_map = None - input_ids, mlm_labels = random_word( - input_ids=tokenized.input_ids, - mask_token_id=self.tokenizer.mask_token_id, - vocabs=self.tokenizer_vocab_ids, - padding_token_id=self.tokenizer.pad_token_id, - greenlight_map=greenlight_map) - else: - input_ids = tokenized.input_ids - mlm_labels = None - - - tokenizer_input = {"input_ids": input_ids, - "attention_mask": tokenized.attention_mask} - - if self.cfg.MODEL.LANGUAGE_BACKBONE.FREEZE: - with torch.no_grad(): - language_dict_features = self.language_backbone(tokenizer_input) - else: - language_dict_features = self.language_backbone(tokenizer_input) - - # ONE HOT - if self.cfg.DATASETS.ONE_HOT: - new_masks = torch.zeros_like(language_dict_features['masks'], - device=language_dict_features['masks'].device) - new_masks[:, :self.cfg.MODEL.DYHEAD.NUM_CLASSES] = 1 - language_dict_features['masks'] = new_masks - - # MASK ALL SPECIAL TOKENS - if self.cfg.MODEL.LANGUAGE_BACKBONE.MASK_SPECIAL: - language_dict_features["masks"] = 1 - tokenized.special_tokens_mask - - language_dict_features["mlm_labels"] = mlm_labels - - # visual embedding - swint_feature_c4 = None - if 'vl' in self.cfg.MODEL.SWINT.VERSION: - # the backbone only updates the "hidden" field in language_dict_features - inputs = {"img": images.tensors, "lang": language_dict_features} - visual_features, language_dict_features, swint_feature_c4 = self.backbone(inputs) - else: - visual_features = self.backbone(images.tensors) - - # rpn force boxes - if targets: - targets = [target.to(device) - for target in targets if target is not None] - - if self.force_boxes: - proposals = [] - for t in targets: - tb = t.copy_with_fields(["labels"]) - tb.add_field("scores", torch.ones(tb.bbox.shape[0], dtype=torch.bool, device=tb.bbox.device)) - proposals.append(tb) - if self.cfg.MODEL.RPN.RETURN_FUSED_FEATURES: - _, proposal_losses, fused_visual_features = self.rpn( - images, visual_features, targets, language_dict_features, - positive_map, captions, swint_feature_c4) - elif self.training: - null_loss = 0 - for key, param in self.rpn.named_parameters(): - null_loss += 0.0 * param.sum() - proposal_losses = {('rpn_null_loss', null_loss)} - else: - proposals, proposal_losses, fused_visual_features = self.rpn(images, visual_features, targets, language_dict_features, positive_map, - captions, swint_feature_c4) - if self.roi_heads: - if self.cfg.MODEL.ROI_MASK_HEAD.PREDICTOR.startswith("VL"): - if self.training: - # "Only support VL mask head right now!!" - assert len(targets) == 1 and len(targets[0]) == len(positive_map), "shape match assert for mask head!!" - # Not necessary but as a safe guard: - # use the binary 0/1 positive map to replace the normalized positive map - targets[0].add_field("positive_map", positive_map) - # TODO: make sure that this use of language_dict_features is correct!! Its content should be changed in self.rpn - if self.cfg.MODEL.RPN.RETURN_FUSED_FEATURES: - x, result, detector_losses = self.roi_heads( - fused_visual_features, proposals, targets, - language_dict_features=language_dict_features, - positive_map_label_to_token=positive_map if not self.training else None - ) - else: - x, result, detector_losses = self.roi_heads( - visual_features, proposals, targets, - language_dict_features=language_dict_features, - positive_map_label_to_token=positive_map if not self.training else None - ) - else: - # RPN-only models don't have roi_heads - x = visual_features - result = proposals - detector_losses = {} - - if self.training: - losses = {} - losses.update(detector_losses) - losses.update(proposal_losses) - return losses - - return result - - def _forward_language_parallel(self, captions=None, targets=None, - device=None, positive_map=None): - ktype = self.cfg.GLIPKNOW.KNOWLEDGE_TYPE - def _construct_captions_from_class_names(class_names): - captions = [] - for c in class_names: - try: - info = self.class_name_to_knowledge[c] - cap = info['clean_name'] - - # combine wiki and gpt3 knowledge - if self.cfg.GLIPKNOW.WIKI_AND_GPT3: - ktype = 'def_wiki' - know_seq = info[ktype] - - ktype = 'gpt3' - if ktype == 'gpt3' or type(info[ktype]) == list: - know_seq += ' '.join([seq for seq in info[ktype][:self.cfg.GLIPKNOW.GPT3_NUM] ]) - - cap += ': ' + know_seq - - # only one knoweldge source is used - else: - if ktype and ktype in info and info[ktype]: - if ktype == 'gpt3' or type(info[ktype]) == list: - know_seq = ' '.join([seq for seq in info[ktype][:self.cfg.GLIPKNOW.GPT3_NUM] ]) - else: - know_seq = info[ktype] - cap += ': ' + know_seq - except: - cap = c - print(f'cap {cap}, c {c}') - - - captions.append(cap) - return captions - - if self.training: - assert captions is None - assert targets is not None - - max_classes_per_batch = self.cfg.GLIPKNOW.MAX_NUM_CLASSES_PER_BATCH_TRAIN - if max_classes_per_batch >= len(self.class_name_list): - shuffled_class_names = self.class_name_list.copy() - random.shuffle(shuffled_class_names) - if max_classes_per_batch > len(shuffled_class_names): - shuffled_class_names.extend(shuffled_class_names[:max_classes_per_batch - -len(shuffled_class_names)]) - random.shuffle(shuffled_class_names) - else: - label_list = [] - label_to_idx = {} - for target_per_im in targets: - labels_per_im = target_per_im.get_field('label_names') - for label in labels_per_im: - if label not in label_to_idx: - label_to_idx[label] = len(label_list) - label_list.append(label) - - label_list = label_list[:max_classes_per_batch] - if len(label_list) < max_classes_per_batch: - all_neg_classes = [c for c in self.class_name_list if c not - in label_to_idx] - neg_label_list = random.sample(all_neg_classes, - max_classes_per_batch - len(label_list)) - label_list.extend(neg_label_list) - random.shuffle(label_list) - shuffled_class_names = label_list - - label_to_shuffled_idx = {l: i for i, l in - enumerate(shuffled_class_names)} - total_boxes = sum(len(t) for t in targets) - positive_map = torch.zeros((total_boxes, max_classes_per_batch+1), - device=device) - offset = 0 - for target_per_im in targets: - labels_per_im = target_per_im.get_field('label_names') - for label in labels_per_im: - j = label_to_shuffled_idx.get(label, -1) - if j >= 0: - positive_map[offset, j] = 1 - offset += 1 - captions = _construct_captions_from_class_names(shuffled_class_names) - captions.append('') # onobj at the end, onedet/modeling/rpn/loss.py:719 - batch_size = len(targets) - - else: - assert captions is not None - batch_size = 1 - assert len(captions) == 1 - class_names = captions[0] - max_classes_per_batch = len(class_names) - captions = _construct_captions_from_class_names(class_names) - captions.append('') # onobj at the end, onedet/modeling/rpn/loss.py:719 - - tokenized = self.tokenizer.batch_encode_plus(captions, - max_length=self.cfg.MODEL.LANGUAGE_BACKBONE.MAX_QUERY_LEN, - padding="longest", - return_special_tokens_mask=True, - return_tensors='pt', - truncation=True).to(device) - assert not self.use_mlm_loss - tokenizer_input = {"input_ids": tokenized.input_ids, - "attention_mask": tokenized.attention_mask} - - if self.cfg.MODEL.LANGUAGE_BACKBONE.FREEZE: - with torch.no_grad(): - language_dict_features = self.language_backbone(tokenizer_input) - else: - language_dict_features = self.language_backbone(tokenizer_input) - - assert not self.cfg.DATASETS.ONE_HOT - assert not self.cfg.MODEL.LANGUAGE_BACKBONE.MASK_SPECIAL - - agg_type = self.cfg.GLIPKNOW.LAN_FEATURE_AGG_TYPE - agg_feats = language_dict_features['hidden'] - agg_emb = language_dict_features['embedded'] - if agg_type == 'first': - agg_feats = agg_feats[:, 0, :] - agg_emb = agg_emb[:, 0, :] - elif agg_type == 'mean': - attn_mask = language_dict_features['masks'] - seq_len = attn_mask.sum(-1).unsqueeze(-1).float() - agg_feats = agg_feats * attn_mask.unsqueeze(-1).float() - agg_feats = agg_feats.sum(1) / seq_len - agg_emb = agg_emb * attn_mask.unsqueeze(-1).float() - agg_emb = agg_emb.sum(1) / seq_len - else: - raise ValueError('not supported GLIPKNOW.LAN_FEATURE_AGG_TYPE: {}'.format(agg_type)) - - expanded_features = agg_feats.unsqueeze(0).repeat(batch_size, 1, 1) - expanded_embedding = agg_emb.unsqueeze(0).repeat(batch_size, 1, 1) - - lang_dict = {} - lang_dict["mlm_labels"] = None - lang_dict["aggregate"] = None - lang_dict["embedded"] = expanded_embedding - lang_dict['hidden'] = expanded_features - lang_dict["masks"] = torch.ones((batch_size, max_classes_per_batch+1), - device=device, dtype=language_dict_features['masks'].dtype) - # in GLIP setting, the token at the end of seqence is usually [PAD], and is masked out - # if [noobj] is not masked out, the loss sum is very big, as most - # anchors are matched to [noobj] - lang_dict["masks"][:,-1] = 0 - return lang_dict, positive_map - diff --git a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/models/test_musicgen.py b/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/models/test_musicgen.py deleted file mode 100644 index 65618a9e2ef5bb382694b50b23dd50958d590d4e..0000000000000000000000000000000000000000 --- a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/models/test_musicgen.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import pytest -import torch - -from audiocraft.models import MusicGen - - -class TestMusicGenModel: - def get_musicgen(self): - mg = MusicGen.get_pretrained(name='debug', device='cpu') - mg.set_generation_params(duration=2.0, extend_stride=2.) - return mg - - def test_base(self): - mg = self.get_musicgen() - assert mg.frame_rate == 25 - assert mg.sample_rate == 32000 - assert mg.audio_channels == 1 - - def test_generate_unconditional(self): - mg = self.get_musicgen() - wav = mg.generate_unconditional(3) - assert list(wav.shape) == [3, 1, 64000] - - def test_generate_continuation(self): - mg = self.get_musicgen() - prompt = torch.randn(3, 1, 32000) - wav = mg.generate_continuation(prompt, 32000) - assert list(wav.shape) == [3, 1, 64000] - - prompt = torch.randn(2, 1, 32000) - wav = mg.generate_continuation( - prompt, 32000, ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 64000] - - prompt = torch.randn(2, 1, 32000) - with pytest.raises(AssertionError): - wav = mg.generate_continuation( - prompt, 32000, ['youpi', 'lapin dort', 'one too many']) - - def test_generate(self): - mg = self.get_musicgen() - wav = mg.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 64000] - - def test_generate_long(self): - mg = self.get_musicgen() - mg.max_duration = 3. - mg.set_generation_params(duration=4., extend_stride=2.) - wav = mg.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 32000 * 4] diff --git a/spaces/Raaniel/Keyword_demo/README.md b/spaces/Raaniel/Keyword_demo/README.md deleted file mode 100644 index e59d1b47f805dabe9802ee376402554b4cb7b997..0000000000000000000000000000000000000000 --- a/spaces/Raaniel/Keyword_demo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Keyword Demo -emoji: 👀 -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Rajagopal/ImageBind_zeroshot_demo2/models/imagebind_model.py b/spaces/Rajagopal/ImageBind_zeroshot_demo2/models/imagebind_model.py deleted file mode 100644 index 395aabf4dbd6f7624ed7e20e74b13d34d0725cfb..0000000000000000000000000000000000000000 --- a/spaces/Rajagopal/ImageBind_zeroshot_demo2/models/imagebind_model.py +++ /dev/null @@ -1,517 +0,0 @@ -#!/usr/bin/env python3 -# Portions Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -import os -import urllib -from functools import partial -from types import SimpleNamespace - -import torch -import torch.nn as nn - -from models.helpers import ( - EinOpsRearrange, - LearnableLogitScaling, - Normalize, - SelectElement, - SelectEOSAndProject, -) -from models.multimodal_preprocessors import ( - AudioPreprocessor, - IMUPreprocessor, - PadIm2Video, - PatchEmbedGeneric, - RGBDTPreprocessor, - SpatioTemporalPosEmbeddingHelper, - TextPreprocessor, - ThermalPreprocessor, -) - -from models.transformer import MultiheadAttention, SimpleTransformer - - -ModalityType = SimpleNamespace( - VISION="vision", - TEXT="text", - AUDIO="audio", - THERMAL="thermal", - DEPTH="depth", - IMU="imu", -) - - -class ImageBindModel(nn.Module): - def __init__( - self, - video_frames=2, - kernel_size=(2, 14, 14), - audio_kernel_size=16, - audio_stride=10, - out_embed_dim=768, - vision_embed_dim=1024, - vision_num_blocks=24, - vision_num_heads=16, - audio_embed_dim=768, - audio_num_blocks=12, - audio_num_heads=12, - audio_num_mel_bins=128, - audio_target_len=204, - audio_drop_path=0.1, - text_embed_dim=768, - text_num_blocks=12, - text_num_heads=12, - depth_embed_dim=384, - depth_kernel_size=16, - depth_num_blocks=12, - depth_num_heads=8, - depth_drop_path=0.0, - thermal_embed_dim=768, - thermal_kernel_size=16, - thermal_num_blocks=12, - thermal_num_heads=12, - thermal_drop_path=0.0, - imu_embed_dim=512, - imu_kernel_size=8, - imu_num_blocks=6, - imu_num_heads=8, - imu_drop_path=0.7, - ): - super().__init__() - - self.modality_preprocessors = self._create_modality_preprocessors( - video_frames, - vision_embed_dim, - kernel_size, - text_embed_dim, - audio_embed_dim, - audio_kernel_size, - audio_stride, - audio_num_mel_bins, - audio_target_len, - depth_embed_dim, - depth_kernel_size, - thermal_embed_dim, - thermal_kernel_size, - imu_embed_dim, - ) - - self.modality_trunks = self._create_modality_trunks( - vision_embed_dim, - vision_num_blocks, - vision_num_heads, - text_embed_dim, - text_num_blocks, - text_num_heads, - audio_embed_dim, - audio_num_blocks, - audio_num_heads, - audio_drop_path, - depth_embed_dim, - depth_num_blocks, - depth_num_heads, - depth_drop_path, - thermal_embed_dim, - thermal_num_blocks, - thermal_num_heads, - thermal_drop_path, - imu_embed_dim, - imu_num_blocks, - imu_num_heads, - imu_drop_path, - ) - - self.modality_heads = self._create_modality_heads( - out_embed_dim, - vision_embed_dim, - text_embed_dim, - audio_embed_dim, - depth_embed_dim, - thermal_embed_dim, - imu_embed_dim, - ) - - self.modality_postprocessors = self._create_modality_postprocessors( - out_embed_dim - ) - - def _create_modality_preprocessors( - self, - video_frames=2, - vision_embed_dim=1024, - kernel_size=(2, 14, 14), - text_embed_dim=768, - audio_embed_dim=768, - audio_kernel_size=16, - audio_stride=10, - audio_num_mel_bins=128, - audio_target_len=204, - depth_embed_dim=768, - depth_kernel_size=16, - thermal_embed_dim=768, - thermal_kernel_size=16, - imu_embed_dim=512, - ): - rgbt_stem = PatchEmbedGeneric( - proj_stem=[ - PadIm2Video(pad_type="repeat", ntimes=2), - nn.Conv3d( - in_channels=3, - kernel_size=kernel_size, - out_channels=vision_embed_dim, - stride=kernel_size, - bias=False, - ), - ] - ) - rgbt_preprocessor = RGBDTPreprocessor( - img_size=[3, video_frames, 224, 224], - num_cls_tokens=1, - pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), - rgbt_stem=rgbt_stem, - depth_stem=None, - ) - - text_preprocessor = TextPreprocessor( - context_length=77, - vocab_size=49408, - embed_dim=text_embed_dim, - causal_masking=True, - ) - - audio_stem = PatchEmbedGeneric( - proj_stem=[ - nn.Conv2d( - in_channels=1, - kernel_size=audio_kernel_size, - stride=audio_stride, - out_channels=audio_embed_dim, - bias=False, - ), - ], - norm_layer=nn.LayerNorm(normalized_shape=audio_embed_dim), - ) - audio_preprocessor = AudioPreprocessor( - img_size=[1, audio_num_mel_bins, audio_target_len], - num_cls_tokens=1, - pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), - audio_stem=audio_stem, - ) - - depth_stem = PatchEmbedGeneric( - [ - nn.Conv2d( - kernel_size=depth_kernel_size, - in_channels=1, - out_channels=depth_embed_dim, - stride=depth_kernel_size, - bias=False, - ), - ], - norm_layer=nn.LayerNorm(normalized_shape=depth_embed_dim), - ) - - depth_preprocessor = RGBDTPreprocessor( - img_size=[1, 224, 224], - num_cls_tokens=1, - pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), - rgbt_stem=None, - depth_stem=depth_stem, - ) - - thermal_stem = PatchEmbedGeneric( - [ - nn.Conv2d( - kernel_size=thermal_kernel_size, - in_channels=1, - out_channels=thermal_embed_dim, - stride=thermal_kernel_size, - bias=False, - ), - ], - norm_layer=nn.LayerNorm(normalized_shape=thermal_embed_dim), - ) - thermal_preprocessor = ThermalPreprocessor( - img_size=[1, 224, 224], - num_cls_tokens=1, - pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), - thermal_stem=thermal_stem, - ) - - imu_stem = PatchEmbedGeneric( - [ - nn.Linear( - in_features=48, - out_features=imu_embed_dim, - bias=False, - ), - ], - norm_layer=nn.LayerNorm(normalized_shape=imu_embed_dim), - ) - - imu_preprocessor = IMUPreprocessor( - img_size=[6, 2000], - num_cls_tokens=1, - kernel_size=8, - embed_dim=imu_embed_dim, - pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), - imu_stem=imu_stem, - ) - - modality_preprocessors = { - ModalityType.VISION: rgbt_preprocessor, - ModalityType.TEXT: text_preprocessor, - ModalityType.AUDIO: audio_preprocessor, - ModalityType.DEPTH: depth_preprocessor, - ModalityType.THERMAL: thermal_preprocessor, - ModalityType.IMU: imu_preprocessor, - } - - return nn.ModuleDict(modality_preprocessors) - - def _create_modality_trunks( - self, - vision_embed_dim=1024, - vision_num_blocks=24, - vision_num_heads=16, - text_embed_dim=768, - text_num_blocks=12, - text_num_heads=12, - audio_embed_dim=768, - audio_num_blocks=12, - audio_num_heads=12, - audio_drop_path=0.0, - depth_embed_dim=768, - depth_num_blocks=12, - depth_num_heads=12, - depth_drop_path=0.0, - thermal_embed_dim=768, - thermal_num_blocks=12, - thermal_num_heads=12, - thermal_drop_path=0.0, - imu_embed_dim=512, - imu_num_blocks=6, - imu_num_heads=8, - imu_drop_path=0.7, - ): - def instantiate_trunk( - embed_dim, num_blocks, num_heads, pre_transformer_ln, add_bias_kv, drop_path - ): - return SimpleTransformer( - embed_dim=embed_dim, - num_blocks=num_blocks, - ffn_dropout_rate=0.0, - drop_path_rate=drop_path, - attn_target=partial( - MultiheadAttention, - embed_dim=embed_dim, - num_heads=num_heads, - bias=True, - add_bias_kv=add_bias_kv, - ), - pre_transformer_layer=nn.Sequential( - nn.LayerNorm(embed_dim, eps=1e-6) - if pre_transformer_ln - else nn.Identity(), - EinOpsRearrange("b l d -> l b d"), - ), - post_transformer_layer=EinOpsRearrange("l b d -> b l d"), - ) - - modality_trunks = {} - modality_trunks[ModalityType.VISION] = instantiate_trunk( - vision_embed_dim, - vision_num_blocks, - vision_num_heads, - pre_transformer_ln=True, - add_bias_kv=False, - drop_path=0.0, - ) - modality_trunks[ModalityType.TEXT] = instantiate_trunk( - text_embed_dim, - text_num_blocks, - text_num_heads, - pre_transformer_ln=False, - add_bias_kv=False, - drop_path=0.0, - ) - modality_trunks[ModalityType.AUDIO] = instantiate_trunk( - audio_embed_dim, - audio_num_blocks, - audio_num_heads, - pre_transformer_ln=False, - add_bias_kv=True, - drop_path=audio_drop_path, - ) - modality_trunks[ModalityType.DEPTH] = instantiate_trunk( - depth_embed_dim, - depth_num_blocks, - depth_num_heads, - pre_transformer_ln=False, - add_bias_kv=True, - drop_path=depth_drop_path, - ) - modality_trunks[ModalityType.THERMAL] = instantiate_trunk( - thermal_embed_dim, - thermal_num_blocks, - thermal_num_heads, - pre_transformer_ln=False, - add_bias_kv=True, - drop_path=thermal_drop_path, - ) - modality_trunks[ModalityType.IMU] = instantiate_trunk( - imu_embed_dim, - imu_num_blocks, - imu_num_heads, - pre_transformer_ln=False, - add_bias_kv=True, - drop_path=imu_drop_path, - ) - - return nn.ModuleDict(modality_trunks) - - def _create_modality_heads( - self, - out_embed_dim, - vision_embed_dim, - text_embed_dim, - audio_embed_dim, - depth_embed_dim, - thermal_embed_dim, - imu_embed_dim, - ): - modality_heads = {} - - modality_heads[ModalityType.VISION] = nn.Sequential( - nn.LayerNorm(normalized_shape=vision_embed_dim, eps=1e-6), - SelectElement(index=0), - nn.Linear(vision_embed_dim, out_embed_dim, bias=False), - ) - - modality_heads[ModalityType.TEXT] = SelectEOSAndProject( - proj=nn.Sequential( - nn.LayerNorm(normalized_shape=text_embed_dim, eps=1e-6), - nn.Linear(text_embed_dim, out_embed_dim, bias=False), - ) - ) - - modality_heads[ModalityType.AUDIO] = nn.Sequential( - nn.LayerNorm(normalized_shape=audio_embed_dim, eps=1e-6), - SelectElement(index=0), - nn.Linear(audio_embed_dim, out_embed_dim, bias=False), - ) - - modality_heads[ModalityType.DEPTH] = nn.Sequential( - nn.LayerNorm(normalized_shape=depth_embed_dim, eps=1e-6), - SelectElement(index=0), - nn.Linear(depth_embed_dim, out_embed_dim, bias=False), - ) - - modality_heads[ModalityType.THERMAL] = nn.Sequential( - nn.LayerNorm(normalized_shape=thermal_embed_dim, eps=1e-6), - SelectElement(index=0), - nn.Linear(thermal_embed_dim, out_embed_dim, bias=False), - ) - - modality_heads[ModalityType.IMU] = nn.Sequential( - nn.LayerNorm(normalized_shape=imu_embed_dim, eps=1e-6), - SelectElement(index=0), - nn.Dropout(p=0.5), - nn.Linear(imu_embed_dim, out_embed_dim, bias=False), - ) - - return nn.ModuleDict(modality_heads) - - def _create_modality_postprocessors(self, out_embed_dim): - modality_postprocessors = {} - - modality_postprocessors[ModalityType.VISION] = Normalize(dim=-1) - modality_postprocessors[ModalityType.TEXT] = nn.Sequential( - Normalize(dim=-1), LearnableLogitScaling(learnable=True) - ) - modality_postprocessors[ModalityType.AUDIO] = nn.Sequential( - Normalize(dim=-1), - LearnableLogitScaling(logit_scale_init=20.0, learnable=False), - ) - modality_postprocessors[ModalityType.DEPTH] = nn.Sequential( - Normalize(dim=-1), - LearnableLogitScaling(logit_scale_init=5.0, learnable=False), - ) - modality_postprocessors[ModalityType.THERMAL] = nn.Sequential( - Normalize(dim=-1), - LearnableLogitScaling(logit_scale_init=10.0, learnable=False), - ) - modality_postprocessors[ModalityType.IMU] = nn.Sequential( - Normalize(dim=-1), - LearnableLogitScaling(logit_scale_init=5.0, learnable=False), - ) - - return nn.ModuleDict(modality_postprocessors) - - def forward(self, inputs): - outputs = {} - for modality_key, modality_value in inputs.items(): - reduce_list = ( - modality_value.ndim >= 5 - ) # Audio and Video inputs consist of multiple clips - if reduce_list: - B, S = modality_value.shape[:2] - modality_value = modality_value.reshape( - B * S, *modality_value.shape[2:] - ) - - if modality_value is not None: - modality_value = self.modality_preprocessors[modality_key]( - **{modality_key: modality_value} - ) - trunk_inputs = modality_value["trunk"] - head_inputs = modality_value["head"] - modality_value = self.modality_trunks[modality_key](**trunk_inputs) - modality_value = self.modality_heads[modality_key]( - modality_value, **head_inputs - ) - modality_value = self.modality_postprocessors[modality_key]( - modality_value - ) - - if reduce_list: - modality_value = modality_value.reshape(B, S, -1) - modality_value = modality_value.mean(dim=1) - - outputs[modality_key] = modality_value - - return outputs - - -def imagebind_huge(pretrained=False): - model = ImageBindModel( - vision_embed_dim=1280, - vision_num_blocks=32, - vision_num_heads=16, - text_embed_dim=1024, - text_num_blocks=24, - text_num_heads=16, - out_embed_dim=1024, - audio_drop_path=0.1, - imu_drop_path=0.7, - ) - - if pretrained: - if not os.path.exists(".checkpoints/imagebind_huge.pth"): - print( - "Downloading imagebind weights to .checkpoints/imagebind_huge.pth ..." - ) - os.makedirs(".checkpoints", exist_ok=True) - torch.hub.download_url_to_file( - "https://dl.fbaipublicfiles.com/imagebind/imagebind_huge.pth", - ".checkpoints/imagebind_huge.pth", - progress=True, - ) - - model.load_state_dict(torch.load(".checkpoints/imagebind_huge.pth")) - - return model diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/cli/main_parser.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/cli/main_parser.py deleted file mode 100644 index 5ade356b9c2f3e375bf598635627870f248c0cc3..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/cli/main_parser.py +++ /dev/null @@ -1,134 +0,0 @@ -"""A single place for constructing and exposing the main parser -""" - -import os -import subprocess -import sys -from typing import List, Optional, Tuple - -from pip._internal.build_env import get_runnable_pip -from pip._internal.cli import cmdoptions -from pip._internal.cli.parser import ConfigOptionParser, UpdatingDefaultsHelpFormatter -from pip._internal.commands import commands_dict, get_similar_commands -from pip._internal.exceptions import CommandError -from pip._internal.utils.misc import get_pip_version, get_prog - -__all__ = ["create_main_parser", "parse_command"] - - -def create_main_parser() -> ConfigOptionParser: - """Creates and returns the main parser for pip's CLI""" - - parser = ConfigOptionParser( - usage="\n%prog [options]", - add_help_option=False, - formatter=UpdatingDefaultsHelpFormatter(), - name="global", - prog=get_prog(), - ) - parser.disable_interspersed_args() - - parser.version = get_pip_version() - - # add the general options - gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser) - parser.add_option_group(gen_opts) - - # so the help formatter knows - parser.main = True # type: ignore - - # create command listing for description - description = [""] + [ - f"{name:27} {command_info.summary}" - for name, command_info in commands_dict.items() - ] - parser.description = "\n".join(description) - - return parser - - -def identify_python_interpreter(python: str) -> Optional[str]: - # If the named file exists, use it. - # If it's a directory, assume it's a virtual environment and - # look for the environment's Python executable. - if os.path.exists(python): - if os.path.isdir(python): - # bin/python for Unix, Scripts/python.exe for Windows - # Try both in case of odd cases like cygwin. - for exe in ("bin/python", "Scripts/python.exe"): - py = os.path.join(python, exe) - if os.path.exists(py): - return py - else: - return python - - # Could not find the interpreter specified - return None - - -def parse_command(args: List[str]) -> Tuple[str, List[str]]: - parser = create_main_parser() - - # Note: parser calls disable_interspersed_args(), so the result of this - # call is to split the initial args into the general options before the - # subcommand and everything else. - # For example: - # args: ['--timeout=5', 'install', '--user', 'INITools'] - # general_options: ['--timeout==5'] - # args_else: ['install', '--user', 'INITools'] - general_options, args_else = parser.parse_args(args) - - # --python - if general_options.python and "_PIP_RUNNING_IN_SUBPROCESS" not in os.environ: - # Re-invoke pip using the specified Python interpreter - interpreter = identify_python_interpreter(general_options.python) - if interpreter is None: - raise CommandError( - f"Could not locate Python interpreter {general_options.python}" - ) - - pip_cmd = [ - interpreter, - get_runnable_pip(), - ] - pip_cmd.extend(args) - - # Set a flag so the child doesn't re-invoke itself, causing - # an infinite loop. - os.environ["_PIP_RUNNING_IN_SUBPROCESS"] = "1" - returncode = 0 - try: - proc = subprocess.run(pip_cmd) - returncode = proc.returncode - except (subprocess.SubprocessError, OSError) as exc: - raise CommandError(f"Failed to run pip under {interpreter}: {exc}") - sys.exit(returncode) - - # --version - if general_options.version: - sys.stdout.write(parser.version) - sys.stdout.write(os.linesep) - sys.exit() - - # pip || pip help -> print_help() - if not args_else or (args_else[0] == "help" and len(args_else) == 1): - parser.print_help() - sys.exit() - - # the subcommand name - cmd_name = args_else[0] - - if cmd_name not in commands_dict: - guess = get_similar_commands(cmd_name) - - msg = [f'unknown command "{cmd_name}"'] - if guess: - msg.append(f'maybe you meant "{guess}"') - - raise CommandError(" - ".join(msg)) - - # all the args without the subcommand - cmd_args = args[:] - cmd_args.remove(cmd_name) - - return cmd_name, cmd_args diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pkg_resources/_vendor/packaging/tags.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pkg_resources/_vendor/packaging/tags.py deleted file mode 100644 index 9a3d25a71c75c975291cf987001ecd6882d6417d..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pkg_resources/_vendor/packaging/tags.py +++ /dev/null @@ -1,487 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import logging -import platform -import sys -import sysconfig -from importlib.machinery import EXTENSION_SUFFIXES -from typing import ( - Dict, - FrozenSet, - Iterable, - Iterator, - List, - Optional, - Sequence, - Tuple, - Union, - cast, -) - -from . import _manylinux, _musllinux - -logger = logging.getLogger(__name__) - -PythonVersion = Sequence[int] -MacVersion = Tuple[int, int] - -INTERPRETER_SHORT_NAMES: Dict[str, str] = { - "python": "py", # Generic. - "cpython": "cp", - "pypy": "pp", - "ironpython": "ip", - "jython": "jy", -} - - -_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32 - - -class Tag: - """ - A representation of the tag triple for a wheel. - - Instances are considered immutable and thus are hashable. Equality checking - is also supported. - """ - - __slots__ = ["_interpreter", "_abi", "_platform", "_hash"] - - def __init__(self, interpreter: str, abi: str, platform: str) -> None: - self._interpreter = interpreter.lower() - self._abi = abi.lower() - self._platform = platform.lower() - # The __hash__ of every single element in a Set[Tag] will be evaluated each time - # that a set calls its `.disjoint()` method, which may be called hundreds of - # times when scanning a page of links for packages with tags matching that - # Set[Tag]. Pre-computing the value here produces significant speedups for - # downstream consumers. - self._hash = hash((self._interpreter, self._abi, self._platform)) - - @property - def interpreter(self) -> str: - return self._interpreter - - @property - def abi(self) -> str: - return self._abi - - @property - def platform(self) -> str: - return self._platform - - def __eq__(self, other: object) -> bool: - if not isinstance(other, Tag): - return NotImplemented - - return ( - (self._hash == other._hash) # Short-circuit ASAP for perf reasons. - and (self._platform == other._platform) - and (self._abi == other._abi) - and (self._interpreter == other._interpreter) - ) - - def __hash__(self) -> int: - return self._hash - - def __str__(self) -> str: - return f"{self._interpreter}-{self._abi}-{self._platform}" - - def __repr__(self) -> str: - return f"<{self} @ {id(self)}>" - - -def parse_tag(tag: str) -> FrozenSet[Tag]: - """ - Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. - - Returning a set is required due to the possibility that the tag is a - compressed tag set. - """ - tags = set() - interpreters, abis, platforms = tag.split("-") - for interpreter in interpreters.split("."): - for abi in abis.split("."): - for platform_ in platforms.split("."): - tags.add(Tag(interpreter, abi, platform_)) - return frozenset(tags) - - -def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]: - value = sysconfig.get_config_var(name) - if value is None and warn: - logger.debug( - "Config variable '%s' is unset, Python ABI tag may be incorrect", name - ) - return value - - -def _normalize_string(string: str) -> str: - return string.replace(".", "_").replace("-", "_") - - -def _abi3_applies(python_version: PythonVersion) -> bool: - """ - Determine if the Python version supports abi3. - - PEP 384 was first implemented in Python 3.2. - """ - return len(python_version) > 1 and tuple(python_version) >= (3, 2) - - -def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: - py_version = tuple(py_version) # To allow for version comparison. - abis = [] - version = _version_nodot(py_version[:2]) - debug = pymalloc = ucs4 = "" - with_debug = _get_config_var("Py_DEBUG", warn) - has_refcount = hasattr(sys, "gettotalrefcount") - # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled - # extension modules is the best option. - # https://github.com/pypa/pip/issues/3383#issuecomment-173267692 - has_ext = "_d.pyd" in EXTENSION_SUFFIXES - if with_debug or (with_debug is None and (has_refcount or has_ext)): - debug = "d" - if py_version < (3, 8): - with_pymalloc = _get_config_var("WITH_PYMALLOC", warn) - if with_pymalloc or with_pymalloc is None: - pymalloc = "m" - if py_version < (3, 3): - unicode_size = _get_config_var("Py_UNICODE_SIZE", warn) - if unicode_size == 4 or ( - unicode_size is None and sys.maxunicode == 0x10FFFF - ): - ucs4 = "u" - elif debug: - # Debug builds can also load "normal" extension modules. - # We can also assume no UCS-4 or pymalloc requirement. - abis.append(f"cp{version}") - abis.insert( - 0, - "cp{version}{debug}{pymalloc}{ucs4}".format( - version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4 - ), - ) - return abis - - -def cpython_tags( - python_version: Optional[PythonVersion] = None, - abis: Optional[Iterable[str]] = None, - platforms: Optional[Iterable[str]] = None, - *, - warn: bool = False, -) -> Iterator[Tag]: - """ - Yields the tags for a CPython interpreter. - - The tags consist of: - - cp-- - - cp-abi3- - - cp-none- - - cp-abi3- # Older Python versions down to 3.2. - - If python_version only specifies a major version then user-provided ABIs and - the 'none' ABItag will be used. - - If 'abi3' or 'none' are specified in 'abis' then they will be yielded at - their normal position and not at the beginning. - """ - if not python_version: - python_version = sys.version_info[:2] - - interpreter = f"cp{_version_nodot(python_version[:2])}" - - if abis is None: - if len(python_version) > 1: - abis = _cpython_abis(python_version, warn) - else: - abis = [] - abis = list(abis) - # 'abi3' and 'none' are explicitly handled later. - for explicit_abi in ("abi3", "none"): - try: - abis.remove(explicit_abi) - except ValueError: - pass - - platforms = list(platforms or platform_tags()) - for abi in abis: - for platform_ in platforms: - yield Tag(interpreter, abi, platform_) - if _abi3_applies(python_version): - yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) - yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) - - if _abi3_applies(python_version): - for minor_version in range(python_version[1] - 1, 1, -1): - for platform_ in platforms: - interpreter = "cp{version}".format( - version=_version_nodot((python_version[0], minor_version)) - ) - yield Tag(interpreter, "abi3", platform_) - - -def _generic_abi() -> Iterator[str]: - abi = sysconfig.get_config_var("SOABI") - if abi: - yield _normalize_string(abi) - - -def generic_tags( - interpreter: Optional[str] = None, - abis: Optional[Iterable[str]] = None, - platforms: Optional[Iterable[str]] = None, - *, - warn: bool = False, -) -> Iterator[Tag]: - """ - Yields the tags for a generic interpreter. - - The tags consist of: - - -- - - The "none" ABI will be added if it was not explicitly provided. - """ - if not interpreter: - interp_name = interpreter_name() - interp_version = interpreter_version(warn=warn) - interpreter = "".join([interp_name, interp_version]) - if abis is None: - abis = _generic_abi() - platforms = list(platforms or platform_tags()) - abis = list(abis) - if "none" not in abis: - abis.append("none") - for abi in abis: - for platform_ in platforms: - yield Tag(interpreter, abi, platform_) - - -def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: - """ - Yields Python versions in descending order. - - After the latest version, the major-only version will be yielded, and then - all previous versions of that major version. - """ - if len(py_version) > 1: - yield f"py{_version_nodot(py_version[:2])}" - yield f"py{py_version[0]}" - if len(py_version) > 1: - for minor in range(py_version[1] - 1, -1, -1): - yield f"py{_version_nodot((py_version[0], minor))}" - - -def compatible_tags( - python_version: Optional[PythonVersion] = None, - interpreter: Optional[str] = None, - platforms: Optional[Iterable[str]] = None, -) -> Iterator[Tag]: - """ - Yields the sequence of tags that are compatible with a specific version of Python. - - The tags consist of: - - py*-none- - - -none-any # ... if `interpreter` is provided. - - py*-none-any - """ - if not python_version: - python_version = sys.version_info[:2] - platforms = list(platforms or platform_tags()) - for version in _py_interpreter_range(python_version): - for platform_ in platforms: - yield Tag(version, "none", platform_) - if interpreter: - yield Tag(interpreter, "none", "any") - for version in _py_interpreter_range(python_version): - yield Tag(version, "none", "any") - - -def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str: - if not is_32bit: - return arch - - if arch.startswith("ppc"): - return "ppc" - - return "i386" - - -def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]: - formats = [cpu_arch] - if cpu_arch == "x86_64": - if version < (10, 4): - return [] - formats.extend(["intel", "fat64", "fat32"]) - - elif cpu_arch == "i386": - if version < (10, 4): - return [] - formats.extend(["intel", "fat32", "fat"]) - - elif cpu_arch == "ppc64": - # TODO: Need to care about 32-bit PPC for ppc64 through 10.2? - if version > (10, 5) or version < (10, 4): - return [] - formats.append("fat64") - - elif cpu_arch == "ppc": - if version > (10, 6): - return [] - formats.extend(["fat32", "fat"]) - - if cpu_arch in {"arm64", "x86_64"}: - formats.append("universal2") - - if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}: - formats.append("universal") - - return formats - - -def mac_platforms( - version: Optional[MacVersion] = None, arch: Optional[str] = None -) -> Iterator[str]: - """ - Yields the platform tags for a macOS system. - - The `version` parameter is a two-item tuple specifying the macOS version to - generate platform tags for. The `arch` parameter is the CPU architecture to - generate platform tags for. Both parameters default to the appropriate value - for the current system. - """ - version_str, _, cpu_arch = platform.mac_ver() - if version is None: - version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) - else: - version = version - if arch is None: - arch = _mac_arch(cpu_arch) - else: - arch = arch - - if (10, 0) <= version and version < (11, 0): - # Prior to Mac OS 11, each yearly release of Mac OS bumped the - # "minor" version number. The major version was always 10. - for minor_version in range(version[1], -1, -1): - compat_version = 10, minor_version - binary_formats = _mac_binary_formats(compat_version, arch) - for binary_format in binary_formats: - yield "macosx_{major}_{minor}_{binary_format}".format( - major=10, minor=minor_version, binary_format=binary_format - ) - - if version >= (11, 0): - # Starting with Mac OS 11, each yearly release bumps the major version - # number. The minor versions are now the midyear updates. - for major_version in range(version[0], 10, -1): - compat_version = major_version, 0 - binary_formats = _mac_binary_formats(compat_version, arch) - for binary_format in binary_formats: - yield "macosx_{major}_{minor}_{binary_format}".format( - major=major_version, minor=0, binary_format=binary_format - ) - - if version >= (11, 0): - # Mac OS 11 on x86_64 is compatible with binaries from previous releases. - # Arm64 support was introduced in 11.0, so no Arm binaries from previous - # releases exist. - # - # However, the "universal2" binary format can have a - # macOS version earlier than 11.0 when the x86_64 part of the binary supports - # that version of macOS. - if arch == "x86_64": - for minor_version in range(16, 3, -1): - compat_version = 10, minor_version - binary_formats = _mac_binary_formats(compat_version, arch) - for binary_format in binary_formats: - yield "macosx_{major}_{minor}_{binary_format}".format( - major=compat_version[0], - minor=compat_version[1], - binary_format=binary_format, - ) - else: - for minor_version in range(16, 3, -1): - compat_version = 10, minor_version - binary_format = "universal2" - yield "macosx_{major}_{minor}_{binary_format}".format( - major=compat_version[0], - minor=compat_version[1], - binary_format=binary_format, - ) - - -def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: - linux = _normalize_string(sysconfig.get_platform()) - if is_32bit: - if linux == "linux_x86_64": - linux = "linux_i686" - elif linux == "linux_aarch64": - linux = "linux_armv7l" - _, arch = linux.split("_", 1) - yield from _manylinux.platform_tags(linux, arch) - yield from _musllinux.platform_tags(arch) - yield linux - - -def _generic_platforms() -> Iterator[str]: - yield _normalize_string(sysconfig.get_platform()) - - -def platform_tags() -> Iterator[str]: - """ - Provides the platform tags for this installation. - """ - if platform.system() == "Darwin": - return mac_platforms() - elif platform.system() == "Linux": - return _linux_platforms() - else: - return _generic_platforms() - - -def interpreter_name() -> str: - """ - Returns the name of the running interpreter. - """ - name = sys.implementation.name - return INTERPRETER_SHORT_NAMES.get(name) or name - - -def interpreter_version(*, warn: bool = False) -> str: - """ - Returns the version of the running interpreter. - """ - version = _get_config_var("py_version_nodot", warn=warn) - if version: - version = str(version) - else: - version = _version_nodot(sys.version_info[:2]) - return version - - -def _version_nodot(version: PythonVersion) -> str: - return "".join(map(str, version)) - - -def sys_tags(*, warn: bool = False) -> Iterator[Tag]: - """ - Returns the sequence of tag triples for the running interpreter. - - The order of the sequence corresponds to priority order for the - interpreter, from most to least important. - """ - - interp_name = interpreter_name() - if interp_name == "cp": - yield from cpython_tags(warn=warn) - else: - yield from generic_tags() - - if interp_name == "pp": - yield from compatible_tags(interpreter="pp3") - else: - yield from compatible_tags() diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/dep_util.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/dep_util.py deleted file mode 100644 index db1fa01996ce0d47cd7f070c53b085926440d377..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/dep_util.py +++ /dev/null @@ -1,96 +0,0 @@ -"""distutils.dep_util - -Utility functions for simple, timestamp-based dependency of files -and groups of files; also, function based entirely on such -timestamp dependency analysis.""" - -import os -from distutils.errors import DistutilsFileError - - -def newer(source, target): - """Return true if 'source' exists and is more recently modified than - 'target', or if 'source' exists and 'target' doesn't. Return false if - both exist and 'target' is the same age or younger than 'source'. - Raise DistutilsFileError if 'source' does not exist. - """ - if not os.path.exists(source): - raise DistutilsFileError("file '%s' does not exist" % os.path.abspath(source)) - if not os.path.exists(target): - return 1 - - from stat import ST_MTIME - - mtime1 = os.stat(source)[ST_MTIME] - mtime2 = os.stat(target)[ST_MTIME] - - return mtime1 > mtime2 - - -# newer () - - -def newer_pairwise(sources, targets): - """Walk two filename lists in parallel, testing if each source is newer - than its corresponding target. Return a pair of lists (sources, - targets) where source is newer than target, according to the semantics - of 'newer()'. - """ - if len(sources) != len(targets): - raise ValueError("'sources' and 'targets' must be same length") - - # build a pair of lists (sources, targets) where source is newer - n_sources = [] - n_targets = [] - for i in range(len(sources)): - if newer(sources[i], targets[i]): - n_sources.append(sources[i]) - n_targets.append(targets[i]) - - return (n_sources, n_targets) - - -# newer_pairwise () - - -def newer_group(sources, target, missing='error'): - """Return true if 'target' is out-of-date with respect to any file - listed in 'sources'. In other words, if 'target' exists and is newer - than every file in 'sources', return false; otherwise return true. - 'missing' controls what we do when a source file is missing; the - default ("error") is to blow up with an OSError from inside 'stat()'; - if it is "ignore", we silently drop any missing source files; if it is - "newer", any missing source files make us assume that 'target' is - out-of-date (this is handy in "dry-run" mode: it'll make you pretend to - carry out commands that wouldn't work because inputs are missing, but - that doesn't matter because you're not actually going to run the - commands). - """ - # If the target doesn't even exist, then it's definitely out-of-date. - if not os.path.exists(target): - return 1 - - # Otherwise we have to find out the hard way: if *any* source file - # is more recent than 'target', then 'target' is out-of-date and - # we can immediately return true. If we fall through to the end - # of the loop, then 'target' is up-to-date and we return false. - from stat import ST_MTIME - - target_mtime = os.stat(target)[ST_MTIME] - for source in sources: - if not os.path.exists(source): - if missing == 'error': # blow up when we stat() the file - pass - elif missing == 'ignore': # missing source dropped from - continue # target's dependency list - elif missing == 'newer': # missing source means target is - return 1 # out-of-date - - source_mtime = os.stat(source)[ST_MTIME] - if source_mtime > target_mtime: - return 1 - else: - return 0 - - -# newer_group () diff --git a/spaces/Realcat/image-matching-webui/third_party/TopicFM/README.md b/spaces/Realcat/image-matching-webui/third_party/TopicFM/README.md deleted file mode 100644 index be60b38c8c265deeef5d7827d9fae4f65e842868..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/TopicFM/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# Submodule used in [hloc](https://github.com/Vincentqyw/Hierarchical-Localization) toolbox - -# [AAAI-23] TopicFM: Robust and Interpretable Topic-Assisted Feature Matching - -Our method first inferred the latent topics (high-level context information) for each image and then use them to explicitly learn robust feature representation for the matching task. Please check out the details in [our paper](https://arxiv.org/abs/2207.00328) - -![Alt Text](demo/topicfm.gif) - -**Overall Architecture:** - -![Alt Text](demo/architecture_v4.png) - -## TODO List - -- [x] Release training and evaluation code on MegaDepth and ScanNet -- [x] Evaluation on HPatches, Aachen Day&Night, and InLoc -- [x] Evaluation for Image Matching Challenge - -## Requirements - -All experiments in this paper are implemented on the Ubuntu environment -with a NVIDIA driver of at least 430.64 and CUDA 10.1. - -First, create a virtual environment by anaconda as follows, - - conda create -n topicfm python=3.8 - conda activate topicfm - conda install pytorch==1.8.1 torchvision==0.9.1 cudatoolkit=10.1 -c pytorch - pip install -r requirements.txt - # using pip to install any missing packages - -## Data Preparation - -The proposed method is trained on the MegaDepth dataset and evaluated on the MegaDepth test, ScanNet, HPatches, Aachen Day and Night (v1.1), and InLoc dataset. -All these datasets are large, so we cannot include them in this code. -The following descriptions help download these datasets. - -### MegaDepth - -This dataset is used for both training and evaluation (Li and Snavely 2018). -To use this dataset with our code, please follow the [instruction of LoFTR](https://github.com/zju3dv/LoFTR/blob/master/docs/TRAINING.md) (Sun et al. 2021) - -### ScanNet -We only use 1500 image pairs of ScanNet (Dai et al. 2017) for evaluation. -Please download and prepare [test data](https://drive.google.com/drive/folders/1DOcOPZb3-5cWxLqn256AhwUVjBPifhuf) of ScanNet -provided by [LoFTR](https://github.com/zju3dv/LoFTR/blob/master/docs/TRAINING.md). - -## Training - -To train our model, we recommend to use GPUs card as much as possible, and each GPU should be at least 12GB. -In our settings, we train on 4 GPUs, each of which is 12GB. -Please setup your hardware environment in `scripts/reproduce_train/outdoor.sh`. -And then run this command to start training. - - bash scripts/reproduce_train/outdoor.sh - - We then provide the trained model in `pretrained/model_best.ckpt` -## Evaluation - -### MegaDepth (relative pose estimation) - - bash scripts/reproduce_test/outdoor.sh - -### ScanNet (relative pose estimation) - - bash scripts/reproduce_test/indoor.sh - -### HPatches, Aachen v1.1, InLoc - -To evaluate on these datasets, we integrate our code to the image-matching-toolbox provided by Zhou et al. (2021). -The updated code is available [here](https://github.com/TruongKhang/image-matching-toolbox). -After cloning this code, please follow instructions of image-matching-toolbox to install all required packages and prepare data for evaluation. - -Then, run these commands to perform evaluation: (note that all hyperparameter settings are in `configs/topicfm.yml`) - -**HPatches (homography estimation)** - - python -m immatch.eval_hpatches --gpu 0 --config 'topicfm' --task 'both' --h_solver 'cv' --ransac_thres 3 --root_dir . --odir 'outputs/hpatches' - -**Aachen Day-Night v1.1 (visual localization)** - - python -m immatch.eval_aachen --gpu 0 --config 'topicfm' --colmap --benchmark_name 'aachen_v1.1' - -**InLoc (visual localization)** - - python -m immatch.eval_inloc --gpu 0 --config 'topicfm' - -### Image Matching Challenge 2022 (IMC-2022) -IMC-2022 was held on [Kaggle](https://www.kaggle.com/competitions/image-matching-challenge-2022/overview). -Most high ranking methods were achieved by using an ensemble method which combines the matching results of -various state-of-the-art methods including LoFTR, SuperPoint+SuperGlue, MatchFormer, or QuadTree Attention. - -In this evaluation, we only submit the results produced by our method (TopicFM) alone. Please refer to [this notebook](https://www.kaggle.com/code/khangtg09121995/topicfm-eval). -This table compares our results with the other methods such as LoFTR (ref. [here](https://www.kaggle.com/code/mcwema/imc-2022-kornia-loftr-score-plateau-0-726)), -SP+SuperGlue (ref. [here](https://www.kaggle.com/code/yufei12/superglue-baseline)). - -| | Public Score | Private Score | -|----------------|--------------|---------------| -| SP + SuperGlue | 0.678 | 0.677 | -| LoFTR | 0.726 | 0.736 | -| TopicFM (ours) | **0.804** | **0.811** | - - -### Runtime comparison - -The runtime reported in the paper is measured by averaging runtime of 1500 image pairs of the ScanNet evaluation dataset. -The image size can be changed at `configs/data/scannet_test_1500.py` - - python visualization.py --method --dataset_name "scannet" --measure_time --no_viz - # note that method_name is in ["topicfm", "loftr"] - -To measure time for LoFTR, please download the LoFTR's code as follows: - - git submodule update --init - # download pretrained models - mkdir third_party/loftr/pretrained - gdown --id 1M-VD35-qdB5Iw-AtbDBCKC7hPolFW9UY -O third_party/loftr/pretrained/outdoor_ds.ckpt - -## Citations -If you find this work useful, please cite this: - - @article{giang2022topicfm, - title={TopicFM: Robust and Interpretable Topic-assisted Feature Matching}, - author={Giang, Khang Truong and Song, Soohwan and Jo, Sungho}, - journal={arXiv preprint arXiv:2207.00328}, - year={2022} - } - -## Acknowledgement -This code is built based on [LoFTR](https://github.com/zju3dv/LoFTR). We thank the authors for their useful source code. diff --git a/spaces/RockmanYang/vocal_remover/lib/layers.py b/spaces/RockmanYang/vocal_remover/lib/layers.py deleted file mode 100644 index 7bc0b7cecbcfff9e1cec7687f03327e5aa8ce859..0000000000000000000000000000000000000000 --- a/spaces/RockmanYang/vocal_remover/lib/layers.py +++ /dev/null @@ -1,160 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from lib import spec_utils - - -class Conv2DBNActiv(nn.Module): - - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False - ), - nn.BatchNorm2d(nout), - activ() - ) - - def __call__(self, x): - return self.conv(x) - - -# class SeperableConv2DBNActiv(nn.Module): - -# def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): -# super(SeperableConv2DBNActiv, self).__init__() -# self.conv = nn.Sequential( -# nn.Conv2d( -# nin, nin, -# kernel_size=ksize, -# stride=stride, -# padding=pad, -# dilation=dilation, -# groups=nin, -# bias=False -# ), -# nn.Conv2d( -# nin, nout, -# kernel_size=1, -# bias=False -# ), -# nn.BatchNorm2d(nout), -# activ() -# ) - -# def __call__(self, x): -# return self.conv(x) - - -class Encoder(nn.Module): - - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, stride, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) - - def __call__(self, x): - h = self.conv1(x) - h = self.conv2(h) - - return h - - -class Decoder(nn.Module): - - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False): - super(Decoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - # self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True) - - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - - h = self.conv1(x) - # h = self.conv2(h) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - - def __init__(self, nin, nout, dilations=(4, 8, 12), activ=nn.ReLU, dropout=False): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ) - ) - self.conv2 = Conv2DBNActiv( - nin, nout, 1, 1, 0, activ=activ - ) - self.conv3 = Conv2DBNActiv( - nin, nout, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = Conv2DBNActiv( - nin, nout, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = Conv2DBNActiv( - nin, nout, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = Conv2DBNActiv( - nout * 5, nout, 1, 1, 0, activ=activ - ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate(self.conv1(x), size=(h, w), mode='bilinear', align_corners=True) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - out = self.bottleneck(out) - - if self.dropout is not None: - out = self.dropout(out) - - return out - - -class LSTMModule(nn.Module): - - def __init__(self, nin_conv, nin_lstm, nout_lstm): - super(LSTMModule, self).__init__() - self.conv = Conv2DBNActiv(nin_conv, 1, 1, 1, 0) - self.lstm = nn.LSTM( - input_size=nin_lstm, - hidden_size=nout_lstm // 2, - bidirectional=True - ) - self.dense = nn.Sequential( - nn.Linear(nout_lstm, nin_lstm), - nn.BatchNorm1d(nin_lstm), - nn.ReLU() - ) - - def forward(self, x): - N, _, nbins, nframes = x.size() - h = self.conv(x)[:, 0] # N, nbins, nframes - h = h.permute(2, 0, 1) # nframes, N, nbins - h, _ = self.lstm(h) - h = self.dense(h.reshape(-1, h.size()[-1])) # nframes * N, nbins - h = h.reshape(nframes, N, 1, nbins) - h = h.permute(1, 2, 3, 0) - - return h diff --git a/spaces/Rominn/vits-uma-genshin-honkai/text/cleaners.py b/spaces/Rominn/vits-uma-genshin-honkai/text/cleaners.py deleted file mode 100644 index d26581deb399609163518054718ad80ecca5d934..0000000000000000000000000000000000000000 --- a/spaces/Rominn/vits-uma-genshin-honkai/text/cleaners.py +++ /dev/null @@ -1,475 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - -import re -from unidecode import unidecode -import pyopenjtalk -from jamo import h2j, j2hcj -from pypinyin import lazy_pinyin, BOPOMOFO -import jieba, cn2an - - -# This is a list of Korean classifiers preceded by pure Korean numerals. -_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' - -# Regular expression matching whitespace: -_whitespace_re = re.compile(r'\s+') - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - -# List of (hangul, hangul divided) pairs: -_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄳ', 'ㄱㅅ'), - ('ㄵ', 'ㄴㅈ'), - ('ㄶ', 'ㄴㅎ'), - ('ㄺ', 'ㄹㄱ'), - ('ㄻ', 'ㄹㅁ'), - ('ㄼ', 'ㄹㅂ'), - ('ㄽ', 'ㄹㅅ'), - ('ㄾ', 'ㄹㅌ'), - ('ㄿ', 'ㄹㅍ'), - ('ㅀ', 'ㄹㅎ'), - ('ㅄ', 'ㅂㅅ'), - ('ㅘ', 'ㅗㅏ'), - ('ㅙ', 'ㅗㅐ'), - ('ㅚ', 'ㅗㅣ'), - ('ㅝ', 'ㅜㅓ'), - ('ㅞ', 'ㅜㅔ'), - ('ㅟ', 'ㅜㅣ'), - ('ㅢ', 'ㅡㅣ'), - ('ㅑ', 'ㅣㅏ'), - ('ㅒ', 'ㅣㅐ'), - ('ㅕ', 'ㅣㅓ'), - ('ㅖ', 'ㅣㅔ'), - ('ㅛ', 'ㅣㅗ'), - ('ㅠ', 'ㅣㅜ') -]] - -# List of (Latin alphabet, hangul) pairs: -_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', '에이'), - ('b', '비'), - ('c', '시'), - ('d', '디'), - ('e', '이'), - ('f', '에프'), - ('g', '지'), - ('h', '에이치'), - ('i', '아이'), - ('j', '제이'), - ('k', '케이'), - ('l', '엘'), - ('m', '엠'), - ('n', '엔'), - ('o', '오'), - ('p', '피'), - ('q', '큐'), - ('r', '아르'), - ('s', '에스'), - ('t', '티'), - ('u', '유'), - ('v', '브이'), - ('w', '더블유'), - ('x', '엑스'), - ('y', '와이'), - ('z', '제트') -]] - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def lowercase(text): - return text.lower() - - -def collapse_whitespace(text): - return re.sub(_whitespace_re, ' ', text) - - -def convert_to_ascii(text): - return unidecode(text) - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text!='': - text+=' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil','pau']: - text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q') - else: - continue - n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']: - a2_next=-1 - else: - a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i str: - """Preprocesses the sentence string by normalizing. - - Args: - s (str): the sentence - - Returns: - string: normalized with default parames - """ - - steps = [remove_articles, white_space_fix, remove_punc, lower] - - return _normalize_text(inp, steps) - - -def exact_match(prediction: str, answer: str) -> int: - """Computes exact match for sentences. - - Args: - prediction (str): the predicted answer - answer (str): the gold answer - - Returns: - int: 1 for exact match, 0 for not - """ - return int(_normalize_text_default(prediction) == _normalize_text_default(answer)) - - -def f1(prediction: str, answer: str) -> float: - """Computes F1-score on token overlap for sentences. - - Args: - prediction (str): the predicted answer - answer (str): the gold answer - - Returns: - boolean: the f1 score - """ - pred_tokens = _normalize_text_default(prediction).split() - answer_tokens = _normalize_text_default(answer).split() - - if len(pred_tokens) == 0 or len(answer_tokens) == 0: - return int(pred_tokens == answer_tokens) - - common_tokens = set(pred_tokens) & set(answer_tokens) - - if len(common_tokens) == 0: - return 0 - - prec = len(common_tokens) / len(pred_tokens) - rec = len(common_tokens) / len(answer_tokens) - - return 2 * (prec * rec) / (prec + rec) - - -def evaluate(answer: Any, prediction: Any): - """Evaluates the model by computing F1-score and exact match of the best - predicted answer on a random sentence. - - Returns: - float: overall exact match - float: overall F1-score - """ - print(prediction, answer) - return exact_match(prediction, answer), f1(prediction, answer) diff --git a/spaces/ServerX/PorcoDiaz/infer/modules/train/extract/extract_f0_rmvpe.py b/spaces/ServerX/PorcoDiaz/infer/modules/train/extract/extract_f0_rmvpe.py deleted file mode 100644 index c6c90440d9e612b37c6d5a514786a6d0fffb19ba..0000000000000000000000000000000000000000 --- a/spaces/ServerX/PorcoDiaz/infer/modules/train/extract/extract_f0_rmvpe.py +++ /dev/null @@ -1,141 +0,0 @@ -import os -import sys -import traceback - -import parselmouth - -now_dir = os.getcwd() -sys.path.append(now_dir) -import logging - -import numpy as np -import pyworld - -from infer.lib.audio import load_audio - -logging.getLogger("numba").setLevel(logging.WARNING) - -n_part = int(sys.argv[1]) -i_part = int(sys.argv[2]) -i_gpu = sys.argv[3] -os.environ["CUDA_VISIBLE_DEVICES"] = str(i_gpu) -exp_dir = sys.argv[4] -is_half = sys.argv[5] -f = open("%s/extract_f0_feature.log" % exp_dir, "a+") - - -def printt(strr): - print(strr) - f.write("%s\n" % strr) - f.flush() - - -class FeatureInput(object): - def __init__(self, samplerate=16000, hop_size=160): - self.fs = samplerate - self.hop = hop_size - - self.f0_bin = 256 - self.f0_max = 1100.0 - self.f0_min = 50.0 - self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700) - self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700) - - def compute_f0(self, path, f0_method): - x = load_audio(path, self.fs) - # p_len = x.shape[0] // self.hop - if f0_method == "rmvpe": - if hasattr(self, "model_rmvpe") == False: - from infer.lib.rmvpe import RMVPE - - print("Loading rmvpe model") - self.model_rmvpe = RMVPE( - "assets/rmvpe/rmvpe.pt", is_half=is_half, device="cuda" - ) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - return f0 - - def coarse_f0(self, f0): - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * ( - self.f0_bin - 2 - ) / (self.f0_mel_max - self.f0_mel_min) + 1 - - # use 0 or 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > self.f0_bin - 1] = self.f0_bin - 1 - f0_coarse = np.rint(f0_mel).astype(int) - assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, ( - f0_coarse.max(), - f0_coarse.min(), - ) - return f0_coarse - - def go(self, paths, f0_method): - if len(paths) == 0: - printt("no-f0-todo") - else: - printt("todo-f0-%s" % len(paths)) - n = max(len(paths) // 5, 1) # 每个进程最多打印5条 - for idx, (inp_path, opt_path1, opt_path2) in enumerate(paths): - try: - if idx % n == 0: - printt("f0ing,now-%s,all-%s,-%s" % (idx, len(paths), inp_path)) - if ( - os.path.exists(opt_path1 + ".npy") == True - and os.path.exists(opt_path2 + ".npy") == True - ): - continue - featur_pit = self.compute_f0(inp_path, f0_method) - np.save( - opt_path2, - featur_pit, - allow_pickle=False, - ) # nsf - coarse_pit = self.coarse_f0(featur_pit) - np.save( - opt_path1, - coarse_pit, - allow_pickle=False, - ) # ori - except: - printt("f0fail-%s-%s-%s" % (idx, inp_path, traceback.format_exc())) - - -if __name__ == "__main__": - # exp_dir=r"E:\codes\py39\dataset\mi-test" - # n_p=16 - # f = open("%s/log_extract_f0.log"%exp_dir, "w") - printt(sys.argv) - featureInput = FeatureInput() - paths = [] - inp_root = "%s/1_16k_wavs" % (exp_dir) - opt_root1 = "%s/2a_f0" % (exp_dir) - opt_root2 = "%s/2b-f0nsf" % (exp_dir) - - os.makedirs(opt_root1, exist_ok=True) - os.makedirs(opt_root2, exist_ok=True) - for name in sorted(list(os.listdir(inp_root))): - inp_path = "%s/%s" % (inp_root, name) - if "spec" in inp_path: - continue - opt_path1 = "%s/%s" % (opt_root1, name) - opt_path2 = "%s/%s" % (opt_root2, name) - paths.append([inp_path, opt_path1, opt_path2]) - try: - featureInput.go(paths[i_part::n_part], "rmvpe") - except: - printt("f0_all_fail-%s" % (traceback.format_exc())) - # ps = [] - # for i in range(n_p): - # p = Process( - # target=featureInput.go, - # args=( - # paths[i::n_p], - # f0method, - # ), - # ) - # ps.append(p) - # p.start() - # for i in range(n_p): - # ps[i].join() diff --git a/spaces/Shredder/CONBERT-2/predict.py b/spaces/Shredder/CONBERT-2/predict.py deleted file mode 100644 index 8cbcb13a58a7515d7b33e1bc30be53ff92ec5acd..0000000000000000000000000000000000000000 --- a/spaces/Shredder/CONBERT-2/predict.py +++ /dev/null @@ -1,126 +0,0 @@ -import torch -import time -from torch.utils.data import DataLoader, RandomSampler, SequentialSampler -from multiprocessing import cpu_count - -from transformers import ( - AutoConfig, - AutoModelForQuestionAnswering, - AutoTokenizer, - squad_convert_examples_to_features -) - -from transformers.data.processors.squad import SquadResult, SquadV2Processor, SquadExample -from transformers.data.metrics.squad_metrics import compute_predictions_logits - - -def run_prediction(question_texts, context_text, model_path, n_best_size=1): - max_seq_length = 512 - doc_stride = 256 - n_best_size = n_best_size - max_query_length = 64 - max_answer_length = 512 - do_lower_case = False - null_score_diff_threshold = 0.0 - - def to_list(tensor): - return tensor.detach().cpu().tolist() - - config_class, model_class, tokenizer_class = (AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer) - config = config_class.from_pretrained(model_path) - tokenizer = tokenizer_class.from_pretrained(model_path, do_lower_case=True, use_fast=False) - model = model_class.from_pretrained(model_path, config=config) - - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - model.to(device) - - processor = SquadV2Processor() - examples = [] - - timer = time.time() - for i, question_text in enumerate(question_texts): - - example = SquadExample( - qas_id=str(i), - question_text=question_text, - context_text=context_text, - answer_text=None, - start_position_character=None, - title="Predict", - answers=None, - ) - - examples.append(example) - print(f'Created Squad Examples in {time.time()-timer} seconds') - - print(f'Number of CPUs: {cpu_count()}') - timer = time.time() - features, dataset = squad_convert_examples_to_features( - examples=examples, - tokenizer=tokenizer, - max_seq_length=max_seq_length, - doc_stride=doc_stride, - max_query_length=max_query_length, - is_training=False, - return_dataset="pt", - threads=cpu_count(), - ) - print(f'Converted Examples to Features in {time.time()-timer} seconds') - - eval_sampler = SequentialSampler(dataset) - eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=10) - - all_results = [] - - timer = time.time() - for batch in eval_dataloader: - model.eval() - batch = tuple(t.to(device) for t in batch) - - with torch.no_grad(): - inputs = { - "input_ids": batch[0], - "attention_mask": batch[1], - "token_type_ids": batch[2], - } - - example_indices = batch[3] - - outputs = model(**inputs) - - for i, example_index in enumerate(example_indices): - eval_feature = features[example_index.item()] - unique_id = int(eval_feature.unique_id) - - output = [to_list(output[i]) for output in outputs.to_tuple()] - - start_logits, end_logits = output - result = SquadResult(unique_id, start_logits, end_logits) - all_results.append(result) - print(f'Model predictions completed in {time.time()-timer} seconds') - - print(all_results) - - output_nbest_file = None - if n_best_size > 1: - output_nbest_file = "nbest.json" - - timer = time.time() - final_predictions = compute_predictions_logits( - all_examples=examples, - all_features=features, - all_results=all_results, - n_best_size=n_best_size, - max_answer_length=max_answer_length, - do_lower_case=do_lower_case, - output_prediction_file=None, - output_nbest_file=output_nbest_file, - output_null_log_odds_file=None, - verbose_logging=False, - version_2_with_negative=True, - null_score_diff_threshold=null_score_diff_threshold, - tokenizer=tokenizer - ) - print(f'Logits converted to predictions in {time.time()-timer} seconds') - - return final_predictions diff --git a/spaces/SmileyTatsu/Smile/README.md b/spaces/SmileyTatsu/Smile/README.md deleted file mode 100644 index 31d99623fd860cf2bfbaed092dee0b5ce85b8326..0000000000000000000000000000000000000000 --- a/spaces/SmileyTatsu/Smile/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Smile -emoji: 🐳 -colorFrom: blue -colorTo: red -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SpacesExamples/streamlit-docker-example/app.py b/spaces/SpacesExamples/streamlit-docker-example/app.py deleted file mode 100644 index fba86794650724fdb7e1c1e685f31b5e298b7763..0000000000000000000000000000000000000000 --- a/spaces/SpacesExamples/streamlit-docker-example/app.py +++ /dev/null @@ -1,42 +0,0 @@ -import streamlit as st -import pandas as pd -import numpy as np - -st.title('Uber pickups in NYC') - -DATE_COLUMN = 'date/time' -DATA_URL = ('https://s3-us-west-2.amazonaws.com/' - 'streamlit-demo-data/uber-raw-data-sep14.csv.gz') - -@st.cache_resource -def load_data(nrows): - data = pd.read_csv(DATA_URL, nrows=nrows) - lowercase = lambda x: str(x).lower() - data.rename(lowercase, axis='columns', inplace=True) - data[DATE_COLUMN] = pd.to_datetime(data[DATE_COLUMN]) - return data - -data_load_state = st.text('Loading data...') -data = load_data(10000) -data_load_state.text("Done! (using st.cache)") - -if st.checkbox('Show raw data'): - st.subheader('Raw data') - st.write(data) - -st.subheader('Number of pickups by hour') -hist_values = np.histogram(data[DATE_COLUMN].dt.hour, bins=24, range=(0,24))[0] -st.bar_chart(hist_values) - -# Some number in the range 0-23 -hour_to_filter = st.slider('hour', 0, 23, 17) -filtered_data = data[data[DATE_COLUMN].dt.hour == hour_to_filter] - -st.subheader('Map of all pickups at %s:00' % hour_to_filter) -st.map(filtered_data) - -uploaded_file = st.file_uploader("Choose a file") -if uploaded_file is not None: - st.write(uploaded_file.name) - bytes_data = uploaded_file.getvalue() - st.write(len(bytes_data), "bytes") diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/page.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/page.py deleted file mode 100644 index d3e6a9eef50767e57cb21d770d7ed2c392288124..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/page.py +++ /dev/null @@ -1,348 +0,0 @@ -# encoding: utf-8 -""" -Paging capabilities for IPython.core - -Notes ------ - -For now this uses IPython hooks, so it can't be in IPython.utils. If we can get -rid of that dependency, we could move it there. ------ -""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - - -import os -import io -import re -import sys -import tempfile -import subprocess - -from io import UnsupportedOperation -from pathlib import Path - -from IPython import get_ipython -from IPython.display import display -from IPython.core.error import TryNext -from IPython.utils.data import chop -from IPython.utils.process import system -from IPython.utils.terminal import get_terminal_size -from IPython.utils import py3compat - - -def display_page(strng, start=0, screen_lines=25): - """Just display, no paging. screen_lines is ignored.""" - if isinstance(strng, dict): - data = strng - else: - if start: - strng = u'\n'.join(strng.splitlines()[start:]) - data = { 'text/plain': strng } - display(data, raw=True) - - -def as_hook(page_func): - """Wrap a pager func to strip the `self` arg - - so it can be called as a hook. - """ - return lambda self, *args, **kwargs: page_func(*args, **kwargs) - - -esc_re = re.compile(r"(\x1b[^m]+m)") - -def page_dumb(strng, start=0, screen_lines=25): - """Very dumb 'pager' in Python, for when nothing else works. - - Only moves forward, same interface as page(), except for pager_cmd and - mode. - """ - if isinstance(strng, dict): - strng = strng.get('text/plain', '') - out_ln = strng.splitlines()[start:] - screens = chop(out_ln,screen_lines-1) - if len(screens) == 1: - print(os.linesep.join(screens[0])) - else: - last_escape = "" - for scr in screens[0:-1]: - hunk = os.linesep.join(scr) - print(last_escape + hunk) - if not page_more(): - return - esc_list = esc_re.findall(hunk) - if len(esc_list) > 0: - last_escape = esc_list[-1] - print(last_escape + os.linesep.join(screens[-1])) - -def _detect_screen_size(screen_lines_def): - """Attempt to work out the number of lines on the screen. - - This is called by page(). It can raise an error (e.g. when run in the - test suite), so it's separated out so it can easily be called in a try block. - """ - TERM = os.environ.get('TERM',None) - if not((TERM=='xterm' or TERM=='xterm-color') and sys.platform != 'sunos5'): - # curses causes problems on many terminals other than xterm, and - # some termios calls lock up on Sun OS5. - return screen_lines_def - - try: - import termios - import curses - except ImportError: - return screen_lines_def - - # There is a bug in curses, where *sometimes* it fails to properly - # initialize, and then after the endwin() call is made, the - # terminal is left in an unusable state. Rather than trying to - # check every time for this (by requesting and comparing termios - # flags each time), we just save the initial terminal state and - # unconditionally reset it every time. It's cheaper than making - # the checks. - try: - term_flags = termios.tcgetattr(sys.stdout) - except termios.error as err: - # can fail on Linux 2.6, pager_page will catch the TypeError - raise TypeError('termios error: {0}'.format(err)) from err - - try: - scr = curses.initscr() - except AttributeError: - # Curses on Solaris may not be complete, so we can't use it there - return screen_lines_def - - screen_lines_real,screen_cols = scr.getmaxyx() - curses.endwin() - - # Restore terminal state in case endwin() didn't. - termios.tcsetattr(sys.stdout,termios.TCSANOW,term_flags) - # Now we have what we needed: the screen size in rows/columns - return screen_lines_real - #print '***Screen size:',screen_lines_real,'lines x',\ - #screen_cols,'columns.' # dbg - -def pager_page(strng, start=0, screen_lines=0, pager_cmd=None): - """Display a string, piping through a pager after a certain length. - - strng can be a mime-bundle dict, supplying multiple representations, - keyed by mime-type. - - The screen_lines parameter specifies the number of *usable* lines of your - terminal screen (total lines minus lines you need to reserve to show other - information). - - If you set screen_lines to a number <=0, page() will try to auto-determine - your screen size and will only use up to (screen_size+screen_lines) for - printing, paging after that. That is, if you want auto-detection but need - to reserve the bottom 3 lines of the screen, use screen_lines = -3, and for - auto-detection without any lines reserved simply use screen_lines = 0. - - If a string won't fit in the allowed lines, it is sent through the - specified pager command. If none given, look for PAGER in the environment, - and ultimately default to less. - - If no system pager works, the string is sent through a 'dumb pager' - written in python, very simplistic. - """ - - # for compatibility with mime-bundle form: - if isinstance(strng, dict): - strng = strng['text/plain'] - - # Ugly kludge, but calling curses.initscr() flat out crashes in emacs - TERM = os.environ.get('TERM','dumb') - if TERM in ['dumb','emacs'] and os.name != 'nt': - print(strng) - return - # chop off the topmost part of the string we don't want to see - str_lines = strng.splitlines()[start:] - str_toprint = os.linesep.join(str_lines) - num_newlines = len(str_lines) - len_str = len(str_toprint) - - # Dumb heuristics to guesstimate number of on-screen lines the string - # takes. Very basic, but good enough for docstrings in reasonable - # terminals. If someone later feels like refining it, it's not hard. - numlines = max(num_newlines,int(len_str/80)+1) - - screen_lines_def = get_terminal_size()[1] - - # auto-determine screen size - if screen_lines <= 0: - try: - screen_lines += _detect_screen_size(screen_lines_def) - except (TypeError, UnsupportedOperation): - print(str_toprint) - return - - #print 'numlines',numlines,'screenlines',screen_lines # dbg - if numlines <= screen_lines : - #print '*** normal print' # dbg - print(str_toprint) - else: - # Try to open pager and default to internal one if that fails. - # All failure modes are tagged as 'retval=1', to match the return - # value of a failed system command. If any intermediate attempt - # sets retval to 1, at the end we resort to our own page_dumb() pager. - pager_cmd = get_pager_cmd(pager_cmd) - pager_cmd += ' ' + get_pager_start(pager_cmd,start) - if os.name == 'nt': - if pager_cmd.startswith('type'): - # The default WinXP 'type' command is failing on complex strings. - retval = 1 - else: - fd, tmpname = tempfile.mkstemp('.txt') - tmppath = Path(tmpname) - try: - os.close(fd) - with tmppath.open("wt", encoding="utf-8") as tmpfile: - tmpfile.write(strng) - cmd = "%s < %s" % (pager_cmd, tmppath) - # tmpfile needs to be closed for windows - if os.system(cmd): - retval = 1 - else: - retval = None - finally: - Path.unlink(tmppath) - else: - try: - retval = None - # Emulate os.popen, but redirect stderr - proc = subprocess.Popen( - pager_cmd, - shell=True, - stdin=subprocess.PIPE, - stderr=subprocess.DEVNULL, - ) - pager = os._wrap_close( - io.TextIOWrapper(proc.stdin, encoding="utf-8"), proc - ) - try: - pager_encoding = pager.encoding or sys.stdout.encoding - pager.write(strng) - finally: - retval = pager.close() - except IOError as msg: # broken pipe when user quits - if msg.args == (32, 'Broken pipe'): - retval = None - else: - retval = 1 - except OSError: - # Other strange problems, sometimes seen in Win2k/cygwin - retval = 1 - if retval is not None: - page_dumb(strng,screen_lines=screen_lines) - - -def page(data, start=0, screen_lines=0, pager_cmd=None): - """Display content in a pager, piping through a pager after a certain length. - - data can be a mime-bundle dict, supplying multiple representations, - keyed by mime-type, or text. - - Pager is dispatched via the `show_in_pager` IPython hook. - If no hook is registered, `pager_page` will be used. - """ - # Some routines may auto-compute start offsets incorrectly and pass a - # negative value. Offset to 0 for robustness. - start = max(0, start) - - # first, try the hook - ip = get_ipython() - if ip: - try: - ip.hooks.show_in_pager(data, start=start, screen_lines=screen_lines) - return - except TryNext: - pass - - # fallback on default pager - return pager_page(data, start, screen_lines, pager_cmd) - - -def page_file(fname, start=0, pager_cmd=None): - """Page a file, using an optional pager command and starting line. - """ - - pager_cmd = get_pager_cmd(pager_cmd) - pager_cmd += ' ' + get_pager_start(pager_cmd,start) - - try: - if os.environ['TERM'] in ['emacs','dumb']: - raise EnvironmentError - system(pager_cmd + ' ' + fname) - except: - try: - if start > 0: - start -= 1 - page(open(fname, encoding="utf-8").read(), start) - except: - print('Unable to show file',repr(fname)) - - -def get_pager_cmd(pager_cmd=None): - """Return a pager command. - - Makes some attempts at finding an OS-correct one. - """ - if os.name == 'posix': - default_pager_cmd = 'less -R' # -R for color control sequences - elif os.name in ['nt','dos']: - default_pager_cmd = 'type' - - if pager_cmd is None: - try: - pager_cmd = os.environ['PAGER'] - except: - pager_cmd = default_pager_cmd - - if pager_cmd == 'less' and '-r' not in os.environ.get('LESS', '').lower(): - pager_cmd += ' -R' - - return pager_cmd - - -def get_pager_start(pager, start): - """Return the string for paging files with an offset. - - This is the '+N' argument which less and more (under Unix) accept. - """ - - if pager in ['less','more']: - if start: - start_string = '+' + str(start) - else: - start_string = '' - else: - start_string = '' - return start_string - - -# (X)emacs on win32 doesn't like to be bypassed with msvcrt.getch() -if os.name == 'nt' and os.environ.get('TERM','dumb') != 'emacs': - import msvcrt - def page_more(): - """ Smart pausing between pages - - @return: True if need print more lines, False if quit - """ - sys.stdout.write('---Return to continue, q to quit--- ') - ans = msvcrt.getwch() - if ans in ("q", "Q"): - result = False - else: - result = True - sys.stdout.write("\b"*37 + " "*37 + "\b"*37) - return result -else: - def page_more(): - ans = py3compat.input('---Return to continue, q to quit--- ') - if ans.lower().startswith('q'): - return False - else: - return True diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/altair/utils/data.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/altair/utils/data.py deleted file mode 100644 index 28e66bfab5764fe58e19fb339b2cdf8ad9d510b4..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/altair/utils/data.py +++ /dev/null @@ -1,299 +0,0 @@ -import json -import os -import random -import hashlib -import warnings - -import pandas as pd -from toolz import curried -from typing import Callable - -from .core import sanitize_dataframe -from .core import sanitize_geo_interface -from .deprecation import AltairDeprecationWarning -from .plugin_registry import PluginRegistry - - -# ============================================================================== -# Data transformer registry -# ============================================================================== -DataTransformerType = Callable - - -class DataTransformerRegistry(PluginRegistry[DataTransformerType]): - _global_settings = {"consolidate_datasets": True} - - @property - def consolidate_datasets(self): - return self._global_settings["consolidate_datasets"] - - @consolidate_datasets.setter - def consolidate_datasets(self, value): - self._global_settings["consolidate_datasets"] = value - - -# ============================================================================== -# Data model transformers -# -# A data model transformer is a pure function that takes a dict or DataFrame -# and returns a transformed version of a dict or DataFrame. The dict objects -# will be the Data portion of the VegaLite schema. The idea is that user can -# pipe a sequence of these data transformers together to prepare the data before -# it hits the renderer. -# -# In this version of Altair, renderers only deal with the dict form of a -# VegaLite spec, after the Data model has been put into a schema compliant -# form. -# -# A data model transformer has the following type signature: -# DataModelType = Union[dict, pd.DataFrame] -# DataModelTransformerType = Callable[[DataModelType, KwArgs], DataModelType] -# ============================================================================== - - -class MaxRowsError(Exception): - """Raised when a data model has too many rows.""" - - pass - - -@curried.curry -def limit_rows(data, max_rows=5000): - """Raise MaxRowsError if the data model has more than max_rows. - - If max_rows is None, then do not perform any check. - """ - check_data_type(data) - if hasattr(data, "__geo_interface__"): - if data.__geo_interface__["type"] == "FeatureCollection": - values = data.__geo_interface__["features"] - else: - values = data.__geo_interface__ - elif isinstance(data, pd.DataFrame): - values = data - elif isinstance(data, dict): - if "values" in data: - values = data["values"] - else: - return data - elif hasattr(data, "__dataframe__"): - values = data - if max_rows is not None and len(values) > max_rows: - raise MaxRowsError( - "The number of rows in your dataset is greater " - f"than the maximum allowed ({max_rows}).\n\n" - "See https://altair-viz.github.io/user_guide/large_datasets.html " - "for information on how to plot large datasets, " - "including how to install third-party data management tools and, " - "in the right circumstance, disable the restriction" - ) - return data - - -@curried.curry -def sample(data, n=None, frac=None): - """Reduce the size of the data model by sampling without replacement.""" - check_data_type(data) - if isinstance(data, pd.DataFrame): - return data.sample(n=n, frac=frac) - elif isinstance(data, dict): - if "values" in data: - values = data["values"] - n = n if n else int(frac * len(values)) - values = random.sample(values, n) - return {"values": values} - elif hasattr(data, "__dataframe__"): - # experimental interchange dataframe support - pi = import_pyarrow_interchange() - pa_table = pi.from_dataframe(data) - n = n if n else int(frac * len(pa_table)) - indices = random.sample(range(len(pa_table)), n) - return pa_table.take(indices) - - -@curried.curry -def to_json( - data, - prefix="altair-data", - extension="json", - filename="{prefix}-{hash}.{extension}", - urlpath="", -): - """ - Write the data model to a .json file and return a url based data model. - """ - data_json = _data_to_json_string(data) - data_hash = _compute_data_hash(data_json) - filename = filename.format(prefix=prefix, hash=data_hash, extension=extension) - with open(filename, "w") as f: - f.write(data_json) - return {"url": os.path.join(urlpath, filename), "format": {"type": "json"}} - - -@curried.curry -def to_csv( - data, - prefix="altair-data", - extension="csv", - filename="{prefix}-{hash}.{extension}", - urlpath="", -): - """Write the data model to a .csv file and return a url based data model.""" - data_csv = _data_to_csv_string(data) - data_hash = _compute_data_hash(data_csv) - filename = filename.format(prefix=prefix, hash=data_hash, extension=extension) - with open(filename, "w") as f: - f.write(data_csv) - return {"url": os.path.join(urlpath, filename), "format": {"type": "csv"}} - - -@curried.curry -def to_values(data): - """Replace a DataFrame by a data model with values.""" - check_data_type(data) - if hasattr(data, "__geo_interface__"): - if isinstance(data, pd.DataFrame): - data = sanitize_dataframe(data) - data = sanitize_geo_interface(data.__geo_interface__) - return {"values": data} - elif isinstance(data, pd.DataFrame): - data = sanitize_dataframe(data) - return {"values": data.to_dict(orient="records")} - elif isinstance(data, dict): - if "values" not in data: - raise KeyError("values expected in data dict, but not present.") - return data - elif hasattr(data, "__dataframe__"): - # experimental interchange dataframe support - pi = import_pyarrow_interchange() - pa_table = pi.from_dataframe(data) - return {"values": pa_table.to_pylist()} - - -def check_data_type(data): - """Raise if the data is not a dict or DataFrame.""" - if not isinstance(data, (dict, pd.DataFrame)) and not any( - hasattr(data, attr) for attr in ["__geo_interface__", "__dataframe__"] - ): - raise TypeError( - "Expected dict, DataFrame or a __geo_interface__ attribute, got: {}".format( - type(data) - ) - ) - - -# ============================================================================== -# Private utilities -# ============================================================================== - - -def _compute_data_hash(data_str): - return hashlib.md5(data_str.encode()).hexdigest() - - -def _data_to_json_string(data): - """Return a JSON string representation of the input data""" - check_data_type(data) - if hasattr(data, "__geo_interface__"): - if isinstance(data, pd.DataFrame): - data = sanitize_dataframe(data) - data = sanitize_geo_interface(data.__geo_interface__) - return json.dumps(data) - elif isinstance(data, pd.DataFrame): - data = sanitize_dataframe(data) - return data.to_json(orient="records", double_precision=15) - elif isinstance(data, dict): - if "values" not in data: - raise KeyError("values expected in data dict, but not present.") - return json.dumps(data["values"], sort_keys=True) - elif hasattr(data, "__dataframe__"): - # experimental interchange dataframe support - pi = import_pyarrow_interchange() - pa_table = pi.from_dataframe(data) - return json.dumps(pa_table.to_pylist()) - else: - raise NotImplementedError( - "to_json only works with data expressed as " "a DataFrame or as a dict" - ) - - -def _data_to_csv_string(data): - """return a CSV string representation of the input data""" - check_data_type(data) - if hasattr(data, "__geo_interface__"): - raise NotImplementedError( - "to_csv does not work with data that " - "contains the __geo_interface__ attribute" - ) - elif isinstance(data, pd.DataFrame): - data = sanitize_dataframe(data) - return data.to_csv(index=False) - elif isinstance(data, dict): - if "values" not in data: - raise KeyError("values expected in data dict, but not present") - return pd.DataFrame.from_dict(data["values"]).to_csv(index=False) - elif hasattr(data, "__dataframe__"): - # experimental interchange dataframe support - pi = import_pyarrow_interchange() - import pyarrow as pa - import pyarrow.csv as pa_csv - - pa_table = pi.from_dataframe(data) - csv_buffer = pa.BufferOutputStream() - pa_csv.write_csv(pa_table, csv_buffer) - return csv_buffer.getvalue().to_pybytes().decode() - else: - raise NotImplementedError( - "to_csv only works with data expressed as " "a DataFrame or as a dict" - ) - - -def pipe(data, *funcs): - """ - Pipe a value through a sequence of functions - - Deprecated: use toolz.curried.pipe() instead. - """ - warnings.warn( - "alt.pipe() is deprecated, and will be removed in a future release. " - "Use toolz.curried.pipe() instead.", - AltairDeprecationWarning, - stacklevel=1, - ) - return curried.pipe(data, *funcs) - - -def curry(*args, **kwargs): - """Curry a callable function - - Deprecated: use toolz.curried.curry() instead. - """ - warnings.warn( - "alt.curry() is deprecated, and will be removed in a future release. " - "Use toolz.curried.curry() instead.", - AltairDeprecationWarning, - stacklevel=1, - ) - return curried.curry(*args, **kwargs) - - -def import_pyarrow_interchange(): - import pkg_resources - - try: - pkg_resources.require("pyarrow>=11.0.0") - # The package is installed and meets the minimum version requirement - import pyarrow.interchange as pi - - return pi - except pkg_resources.DistributionNotFound as err: - # The package is not installed - raise ImportError( - "Usage of the DataFrame Interchange Protocol requires the package 'pyarrow', but it is not installed." - ) from err - except pkg_resources.VersionConflict as err: - # The package is installed but does not meet the minimum version requirement - raise ImportError( - "The installed version of 'pyarrow' does not meet the minimum requirement of version 11.0.0. " - "Please update 'pyarrow' to use the DataFrame Interchange Protocol." - ) from err diff --git a/spaces/Sup3r/Image-Upscaling-Playground/app.py b/spaces/Sup3r/Image-Upscaling-Playground/app.py deleted file mode 100644 index 1f3736667bfd4e5ac6d9ee2ef9b95416cb80f9c0..0000000000000000000000000000000000000000 --- a/spaces/Sup3r/Image-Upscaling-Playground/app.py +++ /dev/null @@ -1,85 +0,0 @@ -import numpy as np -import cv2 -import onnxruntime -import gradio as gr - - -def pre_process(img: np.array) -> np.array: - # H, W, C -> C, H, W - img = np.transpose(img[:, :, 0:3], (2, 0, 1)) - # C, H, W -> 1, C, H, W - img = np.expand_dims(img, axis=0).astype(np.float32) - return img - - -def post_process(img: np.array) -> np.array: - # 1, C, H, W -> C, H, W - img = np.squeeze(img) - # C, H, W -> H, W, C - img = np.transpose(img, (1, 2, 0))[:, :, ::-1].astype(np.uint8) - return img - - -def inference(model_path: str, img_array: np.array) -> np.array: - options = onnxruntime.SessionOptions() - options.intra_op_num_threads = 1 - options.inter_op_num_threads = 1 - ort_session = onnxruntime.InferenceSession(model_path, options) - ort_inputs = {ort_session.get_inputs()[0].name: img_array} - ort_outs = ort_session.run(None, ort_inputs) - - return ort_outs[0] - - -def convert_pil_to_cv2(image): - # pil_image = image.convert("RGB") - open_cv_image = np.array(image) - # RGB to BGR - open_cv_image = open_cv_image[:, :, ::-1].copy() - return open_cv_image - - -def upscale(image, model): - model_path = f"models/{model}.ort" - img = convert_pil_to_cv2(image) - if img.ndim == 2: - img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - - if img.shape[2] == 4: - alpha = img[:, :, 3] # GRAY - alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2BGR) # BGR - alpha_output = post_process(inference(model_path, pre_process(alpha))) # BGR - alpha_output = cv2.cvtColor(alpha_output, cv2.COLOR_BGR2GRAY) # GRAY - - img = img[:, :, 0:3] # BGR - image_output = post_process(inference(model_path, pre_process(img))) # BGR - image_output = cv2.cvtColor(image_output, cv2.COLOR_BGR2BGRA) # BGRA - image_output[:, :, 3] = alpha_output - - elif img.shape[2] == 3: - image_output = post_process(inference(model_path, pre_process(img))) # BGR - - return image_output - - -css = ".output-image, .input-image, .image-preview {height: 480px !important} " -model_choices = ["modelx2", "modelx2 25 JXL", "modelx4", "minecraft_modelx4"] - -gr.Interface( - fn=upscale, - inputs=[ - gr.inputs.Image(type="pil", label="Input Image"), - gr.inputs.Radio( - model_choices, - type="value", - default=None, - label="Choose Upscaler", - optional=False, - ), - ], - outputs="image", - title="Image Upscaling 🦆", - description="Model: [Anchor-based Plain Net for Mobile Image Super-Resolution](https://arxiv.org/abs/2105.09750). Repository: [SR Mobile PyTorch](https://github.com/w11wo/sr_mobile_pytorch)", - allow_flagging="never", - css=css, -).launch() diff --git a/spaces/Supedsa/rvc-models/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py b/spaces/Supedsa/rvc-models/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py deleted file mode 100644 index ee3171bcb7c4a5066560723108b56e055f18be45..0000000000000000000000000000000000000000 --- a/spaces/Supedsa/rvc-models/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py +++ /dev/null @@ -1,90 +0,0 @@ -from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import pyworld -import numpy as np - - -class DioF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def resize_f0(self, x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * target_len, len(source)) / target_len, - np.arange(0, len(source)), - source, - ) - res = np.nan_to_num(target) - return res - - def compute_f0(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.dio( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return self.interpolate_f0(self.resize_f0(f0, p_len))[0] - - def compute_f0_uv(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.dio( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/visualization/color.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/visualization/color.py deleted file mode 100644 index 9041e0e6b7581c3356795d6a3c5e84667c88f025..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/visualization/color.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from enum import Enum - -import numpy as np - -from annotator.uniformer.mmcv.utils import is_str - - -class Color(Enum): - """An enum that defines common colors. - - Contains red, green, blue, cyan, yellow, magenta, white and black. - """ - red = (0, 0, 255) - green = (0, 255, 0) - blue = (255, 0, 0) - cyan = (255, 255, 0) - yellow = (0, 255, 255) - magenta = (255, 0, 255) - white = (255, 255, 255) - black = (0, 0, 0) - - -def color_val(color): - """Convert various input to color tuples. - - Args: - color (:obj:`Color`/str/tuple/int/ndarray): Color inputs - - Returns: - tuple[int]: A tuple of 3 integers indicating BGR channels. - """ - if is_str(color): - return Color[color].value - elif isinstance(color, Color): - return color.value - elif isinstance(color, tuple): - assert len(color) == 3 - for channel in color: - assert 0 <= channel <= 255 - return color - elif isinstance(color, int): - assert 0 <= color <= 255 - return color, color, color - elif isinstance(color, np.ndarray): - assert color.ndim == 1 and color.size == 3 - assert np.all((color >= 0) & (color <= 255)) - color = color.astype(np.uint8) - return tuple(color) - else: - raise TypeError(f'Invalid type for color: {type(color)}') diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ.py deleted file mode 100644 index ef0b6d16d4403fb5d16a3aeb71a22621a0be5e21..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ.py +++ /dev/null @@ -1,29 +0,0 @@ -from .mask_rcnn_R_50_FPN_100ep_LSJ import ( - dataloader, - lr_multiplier, - model, - optimizer, - train, -) -from detectron2.config import LazyCall as L -from detectron2.modeling.backbone import RegNet -from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock - -# Config source: -# https://github.com/facebookresearch/detectron2/blob/main/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py # noqa -model.backbone.bottom_up = L(RegNet)( - stem_class=SimpleStem, - stem_width=32, - block_class=ResBottleneckBlock, - depth=23, - w_a=38.65, - w_0=96, - w_m=2.43, - group_width=40, - norm="SyncBN", - out_features=["s1", "s2", "s3", "s4"], -) -model.pixel_std = [57.375, 57.120, 58.395] - -# RegNets benefit from enabling cudnn benchmark mode -train.cudnn_benchmark = True diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/data/dataset_mapper.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/data/dataset_mapper.py deleted file mode 100644 index a8714f7990f11e146a01e03d108518e0356b50c4..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/data/dataset_mapper.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import copy -import logging -import numpy as np -from typing import List, Optional, Union -import torch - -from detectron2.config import configurable - -from . import detection_utils as utils -from . import transforms as T - -""" -This file contains the default mapping that's applied to "dataset dicts". -""" - -__all__ = ["DatasetMapper"] - - -class DatasetMapper: - """ - A callable which takes a dataset dict in Detectron2 Dataset format, - and map it into a format used by the model. - - This is the default callable to be used to map your dataset dict into training data. - You may need to follow it to implement your own one for customized logic, - such as a different way to read or transform images. - See :doc:`/tutorials/data_loading` for details. - - The callable currently does the following: - - 1. Read the image from "file_name" - 2. Applies cropping/geometric transforms to the image and annotations - 3. Prepare data and annotations to Tensor and :class:`Instances` - """ - - @configurable - def __init__( - self, - is_train: bool, - *, - augmentations: List[Union[T.Augmentation, T.Transform]], - image_format: str, - use_instance_mask: bool = False, - use_keypoint: bool = False, - instance_mask_format: str = "polygon", - keypoint_hflip_indices: Optional[np.ndarray] = None, - precomputed_proposal_topk: Optional[int] = None, - recompute_boxes: bool = False, - ): - """ - NOTE: this interface is experimental. - - Args: - is_train: whether it's used in training or inference - augmentations: a list of augmentations or deterministic transforms to apply - image_format: an image format supported by :func:`detection_utils.read_image`. - use_instance_mask: whether to process instance segmentation annotations, if available - use_keypoint: whether to process keypoint annotations if available - instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation - masks into this format. - keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices` - precomputed_proposal_topk: if given, will load pre-computed - proposals from dataset_dict and keep the top k proposals for each image. - recompute_boxes: whether to overwrite bounding box annotations - by computing tight bounding boxes from instance mask annotations. - """ - if recompute_boxes: - assert use_instance_mask, "recompute_boxes requires instance masks" - # fmt: off - self.is_train = is_train - self.augmentations = T.AugmentationList(augmentations) - self.image_format = image_format - self.use_instance_mask = use_instance_mask - self.instance_mask_format = instance_mask_format - self.use_keypoint = use_keypoint - self.keypoint_hflip_indices = keypoint_hflip_indices - self.proposal_topk = precomputed_proposal_topk - self.recompute_boxes = recompute_boxes - # fmt: on - logger = logging.getLogger(__name__) - mode = "training" if is_train else "inference" - logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}") - - @classmethod - def from_config(cls, cfg, is_train: bool = True): - augs = utils.build_augmentation(cfg, is_train) - if cfg.INPUT.CROP.ENABLED and is_train: - augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)) - recompute_boxes = cfg.MODEL.MASK_ON - else: - recompute_boxes = False - - ret = { - "is_train": is_train, - "augmentations": augs, - "image_format": cfg.INPUT.FORMAT, - "use_instance_mask": cfg.MODEL.MASK_ON, - "instance_mask_format": cfg.INPUT.MASK_FORMAT, - "use_keypoint": cfg.MODEL.KEYPOINT_ON, - "recompute_boxes": recompute_boxes, - } - - if cfg.MODEL.KEYPOINT_ON: - ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN) - - if cfg.MODEL.LOAD_PROPOSALS: - ret["precomputed_proposal_topk"] = ( - cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN - if is_train - else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST - ) - return ret - - def _transform_annotations(self, dataset_dict, transforms, image_shape): - # USER: Modify this if you want to keep them for some reason. - for anno in dataset_dict["annotations"]: - if not self.use_instance_mask: - anno.pop("segmentation", None) - if not self.use_keypoint: - anno.pop("keypoints", None) - - # USER: Implement additional transformations if you have other types of data - annos = [ - utils.transform_instance_annotations( - obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices - ) - for obj in dataset_dict.pop("annotations") - if obj.get("iscrowd", 0) == 0 - ] - instances = utils.annotations_to_instances( - annos, image_shape, mask_format=self.instance_mask_format - ) - - # After transforms such as cropping are applied, the bounding box may no longer - # tightly bound the object. As an example, imagine a triangle object - # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight - # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to - # the intersection of original bounding box and the cropping box. - if self.recompute_boxes: - instances.gt_boxes = instances.gt_masks.get_bounding_boxes() - dataset_dict["instances"] = utils.filter_empty_instances(instances) - - def __call__(self, dataset_dict): - """ - Args: - dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. - - Returns: - dict: a format that builtin models in detectron2 accept - """ - dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below - # USER: Write your own image loading if it's not from a file - image = utils.read_image(dataset_dict["file_name"], format=self.image_format) - utils.check_image_size(dataset_dict, image) - - # USER: Remove if you don't do semantic/panoptic segmentation. - if "sem_seg_file_name" in dataset_dict: - sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2) - else: - sem_seg_gt = None - - aug_input = T.AugInput(image, sem_seg=sem_seg_gt) - transforms = self.augmentations(aug_input) - image, sem_seg_gt = aug_input.image, aug_input.sem_seg - - image_shape = image.shape[:2] # h, w - # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, - # but not efficient on large generic data structures due to the use of pickle & mp.Queue. - # Therefore it's important to use torch.Tensor. - dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) - if sem_seg_gt is not None: - dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long")) - - # USER: Remove if you don't use pre-computed proposals. - # Most users would not need this feature. - if self.proposal_topk is not None: - utils.transform_proposals( - dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk - ) - - if not self.is_train: - # USER: Modify this if you want to keep them for some reason. - dataset_dict.pop("annotations", None) - dataset_dict.pop("sem_seg_file_name", None) - return dataset_dict - - if "annotations" in dataset_dict: - self._transform_annotations(dataset_dict, transforms, image_shape) - - return dataset_dict diff --git a/spaces/Um124/Global_Warming_Analysis/pages/Oil Consumption data Analysis.py b/spaces/Um124/Global_Warming_Analysis/pages/Oil Consumption data Analysis.py deleted file mode 100644 index 075fd45a54d7b1e218caad800d1cfb9f6e0ae7f0..0000000000000000000000000000000000000000 --- a/spaces/Um124/Global_Warming_Analysis/pages/Oil Consumption data Analysis.py +++ /dev/null @@ -1,86 +0,0 @@ -import pandas as pd -import numpy as np -import plotly.express as px -import streamlit as st - - -st.set_page_config( - page_title='Oil Consumption data Analysis', - page_icon='📈', - layout='wide' -) - - -Years=['1965','1966','1967','1968','1969','1970','1971','1972','1973','1974','1975','1976','1977','1978', -'1979','1980','1981','1982','1983','1984','1985','1986','1987','1988','1989','1990','1991','1992','1993', -'1994','1995','1996','1997','1998','1999','2000','2001','2002','2003','2004','2005','2006','2007','2008', -'2009','2010','2011','2012','2013','2014','2015','2016'] - -@st.cache_data -def load_data(): - df=pd.read_csv('data/oil_consumption_per_cap.csv') - df.rename(columns={'geo':'Country'},inplace=True) - df.set_index('Country',inplace=True) - df['Total'] = df[Years].sum(axis=1) - df['Avgrage']=df.mean(axis=1) - df['Maximum']=df.max(axis=1) - df['Minimum']=df.min(axis=1) - df.sort_index(inplace=True) - return df - -st.title('Oil Consumption per Capital') -df = load_data() -st.dataframe(df,use_container_width=True) - -countries= df.index.unique().tolist() -Graphs = ['bar','pie','line','area','funnel'] -c1,c2 = st.columns(2) -country = c1.selectbox("Select a Country", countries) -Graph = c2.selectbox("Select a Graph type", Graphs) - -st.header("Country wise visualization") -cdf = df.loc[country,Years].reset_index() -cdf.rename({'index':'Years'},axis=1, inplace=True) -if Graph == Graphs[0]: - fig = px.bar(cdf, 'Years',country, title=f'{country} Oil Consumption per Capital') -if Graph == Graphs[1]: - fig = px.pie(cdf, 'Years',country, title=f'{country} Oil Consumption per Capital') -if Graph == Graphs[2]: - fig = px.line(cdf, 'Years',country, title=f'{country} Oil Consumption per Capital') -if Graph == Graphs[3]: - fig = px.area(cdf, 'Years',country, title=f'{country} Oil Consumption per Capital') -if Graph == Graphs[4]: - fig = px.funnel(cdf, 'Years',country, title=f'{country} Oil Consumption per Capital') -st.plotly_chart(fig, use_container_width=True) - -st.header("Comparison of Countries") -clist = st.multiselect("Select countries to compare", countries, default='India') -cdf = df.loc[clist, Years].T # T to rotate the data in 90deg -st.write(cdf) -figc = px.line(cdf,cdf.index, clist, title=f'Comparing {", ".join(clist)}') - -st.plotly_chart(figc, use_container_width=True) - -df.sort_values(by='Total', ascending=False, inplace=True) -fig1=px.bar(df, x=df.index, y='Total',title='Total Oil Consumption per Capital') -st.plotly_chart(fig1, use_container_width=True) - -dfavg = df.sort_values(by='Avgrage').reset_index() -dfavg.rename({'index':'Country'},axis=1,inplace=True) -fig2=px.bar(dfavg, 'Country', 'Avgrage', title="Avgrage Oil Consumption per Capital by Country") -st.plotly_chart(fig2, use_container_width=True) - -dfmax=df.sort_values(by='Maximum').reset_index() -dfmax.rename({'index':'Country'},axis=1,inplace=True) -fig3=px.bar(dfmax,'Country','Maximum',title='Maximum Oil Consumption per Capital by the Country') -st.plotly_chart(fig3, use_container_width=True) - -dfmin=df.sort_values(by='Minimum').reset_index() -dfmin.rename({'index':'Country'},axis=1,inplace=True) -fig4=px.bar(dfmin,'Country','Minimum',title='Minimum Oil Consumption per Capital by the Country' ) -st.plotly_chart(fig4,use_container_width=True) - -dfcomp=df.sort_values(by='Country',ascending=False,inplace=True) -fig5 = px.line(df, x=df.index, y='Maximum',title='Maximum and Minimum Oil Consumption per Capital comparisons') -fig5.add_scatter(x=df.index, y=df['Minimum'], mode='lines',) -st.plotly_chart(fig5, use_container_width=True) \ No newline at end of file diff --git a/spaces/VIPLab/Track-Anything/tracker/inference/memory_manager.py b/spaces/VIPLab/Track-Anything/tracker/inference/memory_manager.py deleted file mode 100644 index d47d96e400ba6050e6bb4325cdb21a1c3a25edc6..0000000000000000000000000000000000000000 --- a/spaces/VIPLab/Track-Anything/tracker/inference/memory_manager.py +++ /dev/null @@ -1,286 +0,0 @@ -import torch -import warnings - -from inference.kv_memory_store import KeyValueMemoryStore -from model.memory_util import * - - -class MemoryManager: - """ - Manages all three memory stores and the transition between working/long-term memory - """ - def __init__(self, config): - self.hidden_dim = config['hidden_dim'] - self.top_k = config['top_k'] - - self.enable_long_term = config['enable_long_term'] - self.enable_long_term_usage = config['enable_long_term_count_usage'] - if self.enable_long_term: - self.max_mt_frames = config['max_mid_term_frames'] - self.min_mt_frames = config['min_mid_term_frames'] - self.num_prototypes = config['num_prototypes'] - self.max_long_elements = config['max_long_term_elements'] - - # dimensions will be inferred from input later - self.CK = self.CV = None - self.H = self.W = None - - # The hidden state will be stored in a single tensor for all objects - # B x num_objects x CH x H x W - self.hidden = None - - self.work_mem = KeyValueMemoryStore(count_usage=self.enable_long_term) - if self.enable_long_term: - self.long_mem = KeyValueMemoryStore(count_usage=self.enable_long_term_usage) - - self.reset_config = True - - def update_config(self, config): - self.reset_config = True - self.hidden_dim = config['hidden_dim'] - self.top_k = config['top_k'] - - assert self.enable_long_term == config['enable_long_term'], 'cannot update this' - assert self.enable_long_term_usage == config['enable_long_term_count_usage'], 'cannot update this' - - self.enable_long_term_usage = config['enable_long_term_count_usage'] - if self.enable_long_term: - self.max_mt_frames = config['max_mid_term_frames'] - self.min_mt_frames = config['min_mid_term_frames'] - self.num_prototypes = config['num_prototypes'] - self.max_long_elements = config['max_long_term_elements'] - - def _readout(self, affinity, v): - # this function is for a single object group - return v @ affinity - - def match_memory(self, query_key, selection): - # query_key: B x C^k x H x W - # selection: B x C^k x H x W - num_groups = self.work_mem.num_groups - h, w = query_key.shape[-2:] - - query_key = query_key.flatten(start_dim=2) - selection = selection.flatten(start_dim=2) if selection is not None else None - - """ - Memory readout using keys - """ - - if self.enable_long_term and self.long_mem.engaged(): - # Use long-term memory - long_mem_size = self.long_mem.size - memory_key = torch.cat([self.long_mem.key, self.work_mem.key], -1) - shrinkage = torch.cat([self.long_mem.shrinkage, self.work_mem.shrinkage], -1) - - similarity = get_similarity(memory_key, shrinkage, query_key, selection) - work_mem_similarity = similarity[:, long_mem_size:] - long_mem_similarity = similarity[:, :long_mem_size] - - # get the usage with the first group - # the first group always have all the keys valid - affinity, usage = do_softmax( - torch.cat([long_mem_similarity[:, -self.long_mem.get_v_size(0):], work_mem_similarity], 1), - top_k=self.top_k, inplace=True, return_usage=True) - affinity = [affinity] - - # compute affinity group by group as later groups only have a subset of keys - for gi in range(1, num_groups): - if gi < self.long_mem.num_groups: - # merge working and lt similarities before softmax - affinity_one_group = do_softmax( - torch.cat([long_mem_similarity[:, -self.long_mem.get_v_size(gi):], - work_mem_similarity[:, -self.work_mem.get_v_size(gi):]], 1), - top_k=self.top_k, inplace=True) - else: - # no long-term memory for this group - affinity_one_group = do_softmax(work_mem_similarity[:, -self.work_mem.get_v_size(gi):], - top_k=self.top_k, inplace=(gi==num_groups-1)) - affinity.append(affinity_one_group) - - all_memory_value = [] - for gi, gv in enumerate(self.work_mem.value): - # merge the working and lt values before readout - if gi < self.long_mem.num_groups: - all_memory_value.append(torch.cat([self.long_mem.value[gi], self.work_mem.value[gi]], -1)) - else: - all_memory_value.append(gv) - - """ - Record memory usage for working and long-term memory - """ - # ignore the index return for long-term memory - work_usage = usage[:, long_mem_size:] - self.work_mem.update_usage(work_usage.flatten()) - - if self.enable_long_term_usage: - # ignore the index return for working memory - long_usage = usage[:, :long_mem_size] - self.long_mem.update_usage(long_usage.flatten()) - else: - # No long-term memory - similarity = get_similarity(self.work_mem.key, self.work_mem.shrinkage, query_key, selection) - - if self.enable_long_term: - affinity, usage = do_softmax(similarity, inplace=(num_groups==1), - top_k=self.top_k, return_usage=True) - - # Record memory usage for working memory - self.work_mem.update_usage(usage.flatten()) - else: - affinity = do_softmax(similarity, inplace=(num_groups==1), - top_k=self.top_k, return_usage=False) - - affinity = [affinity] - - # compute affinity group by group as later groups only have a subset of keys - for gi in range(1, num_groups): - affinity_one_group = do_softmax(similarity[:, -self.work_mem.get_v_size(gi):], - top_k=self.top_k, inplace=(gi==num_groups-1)) - affinity.append(affinity_one_group) - - all_memory_value = self.work_mem.value - - # Shared affinity within each group - all_readout_mem = torch.cat([ - self._readout(affinity[gi], gv) - for gi, gv in enumerate(all_memory_value) - ], 0) - - return all_readout_mem.view(all_readout_mem.shape[0], self.CV, h, w) - - def add_memory(self, key, shrinkage, value, objects, selection=None): - # key: 1*C*H*W - # value: 1*num_objects*C*H*W - # objects contain a list of object indices - if self.H is None or self.reset_config: - self.reset_config = False - self.H, self.W = key.shape[-2:] - self.HW = self.H*self.W - if self.enable_long_term: - # convert from num. frames to num. nodes - self.min_work_elements = self.min_mt_frames*self.HW - self.max_work_elements = self.max_mt_frames*self.HW - - # key: 1*C*N - # value: num_objects*C*N - key = key.flatten(start_dim=2) - shrinkage = shrinkage.flatten(start_dim=2) - value = value[0].flatten(start_dim=2) - - self.CK = key.shape[1] - self.CV = value.shape[1] - - if selection is not None: - if not self.enable_long_term: - warnings.warn('the selection factor is only needed in long-term mode', UserWarning) - selection = selection.flatten(start_dim=2) - - self.work_mem.add(key, value, shrinkage, selection, objects) - - # long-term memory cleanup - if self.enable_long_term: - # Do memory compressed if needed - if self.work_mem.size >= self.max_work_elements: - # print('remove memory') - # Remove obsolete features if needed - if self.long_mem.size >= (self.max_long_elements-self.num_prototypes): - self.long_mem.remove_obsolete_features(self.max_long_elements-self.num_prototypes) - - self.compress_features() - - def create_hidden_state(self, n, sample_key): - # n is the TOTAL number of objects - h, w = sample_key.shape[-2:] - if self.hidden is None: - self.hidden = torch.zeros((1, n, self.hidden_dim, h, w), device=sample_key.device) - elif self.hidden.shape[1] != n: - self.hidden = torch.cat([ - self.hidden, - torch.zeros((1, n-self.hidden.shape[1], self.hidden_dim, h, w), device=sample_key.device) - ], 1) - - assert(self.hidden.shape[1] == n) - - def set_hidden(self, hidden): - self.hidden = hidden - - def get_hidden(self): - return self.hidden - - def compress_features(self): - HW = self.HW - candidate_value = [] - total_work_mem_size = self.work_mem.size - for gv in self.work_mem.value: - # Some object groups might be added later in the video - # So not all keys have values associated with all objects - # We need to keep track of the key->value validity - mem_size_in_this_group = gv.shape[-1] - if mem_size_in_this_group == total_work_mem_size: - # full LT - candidate_value.append(gv[:,:,HW:-self.min_work_elements+HW]) - else: - # mem_size is smaller than total_work_mem_size, but at least HW - assert HW <= mem_size_in_this_group < total_work_mem_size - if mem_size_in_this_group > self.min_work_elements+HW: - # part of this object group still goes into LT - candidate_value.append(gv[:,:,HW:-self.min_work_elements+HW]) - else: - # this object group cannot go to the LT at all - candidate_value.append(None) - - # perform memory consolidation - prototype_key, prototype_value, prototype_shrinkage = self.consolidation( - *self.work_mem.get_all_sliced(HW, -self.min_work_elements+HW), candidate_value) - - # remove consolidated working memory - self.work_mem.sieve_by_range(HW, -self.min_work_elements+HW, min_size=self.min_work_elements+HW) - - # add to long-term memory - self.long_mem.add(prototype_key, prototype_value, prototype_shrinkage, selection=None, objects=None) - # print(f'long memory size: {self.long_mem.size}') - # print(f'work memory size: {self.work_mem.size}') - - def consolidation(self, candidate_key, candidate_shrinkage, candidate_selection, usage, candidate_value): - # keys: 1*C*N - # values: num_objects*C*N - N = candidate_key.shape[-1] - - # find the indices with max usage - _, max_usage_indices = torch.topk(usage, k=self.num_prototypes, dim=-1, sorted=True) - prototype_indices = max_usage_indices.flatten() - - # Prototypes are invalid for out-of-bound groups - validity = [prototype_indices >= (N-gv.shape[2]) if gv is not None else None for gv in candidate_value] - - prototype_key = candidate_key[:, :, prototype_indices] - prototype_selection = candidate_selection[:, :, prototype_indices] if candidate_selection is not None else None - - """ - Potentiation step - """ - similarity = get_similarity(candidate_key, candidate_shrinkage, prototype_key, prototype_selection) - - # convert similarity to affinity - # need to do it group by group since the softmax normalization would be different - affinity = [ - do_softmax(similarity[:, -gv.shape[2]:, validity[gi]]) if gv is not None else None - for gi, gv in enumerate(candidate_value) - ] - - # some values can be have all False validity. Weed them out. - affinity = [ - aff if aff is None or aff.shape[-1] > 0 else None for aff in affinity - ] - - # readout the values - prototype_value = [ - self._readout(affinity[gi], gv) if affinity[gi] is not None else None - for gi, gv in enumerate(candidate_value) - ] - - # readout the shrinkage term - prototype_shrinkage = self._readout(affinity[0], candidate_shrinkage) if candidate_shrinkage is not None else None - - return prototype_key, prototype_value, prototype_shrinkage \ No newline at end of file diff --git a/spaces/Veer15/image-prompt-editing/README.md b/spaces/Veer15/image-prompt-editing/README.md deleted file mode 100644 index c7661a37b8b5f8b9f2d064fff0a09e7441965c12..0000000000000000000000000000000000000000 --- a/spaces/Veer15/image-prompt-editing/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Panel InstructPix2Pix -emoji: 🏃 -colorFrom: purple -colorTo: purple -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/You.py b/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/You.py deleted file mode 100644 index 02a2774ce62bae33612a73272d584dc2acaf3eb0..0000000000000000000000000000000000000000 --- a/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/You.py +++ /dev/null @@ -1,24 +0,0 @@ -import os -import json -import time -import subprocess - -from ...typing import sha256, Dict, get_type_hints - -url = 'https://you.com' -model = 'gpt-3.5-turbo' -supports_stream = True -needs_auth = False - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - - path = os.path.dirname(os.path.realpath(__file__)) - config = json.dumps({ - 'messages': messages}, separators=(',', ':')) - - cmd = ['python3', f'{path}/helpers/you.py', config] - - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - - for line in iter(p.stdout.readline, b''): - yield line.decode('utf-8') #[:-1] \ No newline at end of file diff --git a/spaces/Vipitis/ShaderEval/app.py b/spaces/Vipitis/ShaderEval/app.py deleted file mode 100644 index 8080c1b5e91f455edeaf72588e63937d8edd84ef..0000000000000000000000000000000000000000 --- a/spaces/Vipitis/ShaderEval/app.py +++ /dev/null @@ -1,70 +0,0 @@ -import gradio as gr -import evaluate - -suite = evaluate.EvaluationSuite.load("Vipitis/ShaderEval") #downloads it - -#TODO: can you import it locally instead? -# from ShaderEval import Suite -# suite = Suite("Vipitis/ShaderEval") -# save resutls to a file? - -text = """# Welcome to the ShaderEval Suite. - - This space hosts the ShaderEval Suite. more to follow soon. - For an interactive Demo and more information see the demo space [ShaderCoder](https://huggingface.co/spaces/Vipitis/ShaderCoder) - - # Task1: Return Completion - ## Explanation - Modelled after the [CodeXGLUE code_completion_line](https://huggingface.co/datasets/code_x_glue_cc_code_completion_line) task. - Using the "return_completion" subset of the [Shadertoys-fine dataset](https://huggingface.co/datasets/Vipitis/Shadertoys-fine). - All preprocessing and post proessing is done by the custom evaluator for this suite. It should be as easy as just giving it a model checkpoint that can do the "text-generation" task. - Evaluated is currently with just [exact_match](https://huggingface.co/metrics/exact_match). - - ## Notice - should you find any model that throws an error, please let me know in the issues tab. Several parts of this suite are still missing. - - ## Instructions - ### Run the code yourself:. - ```python - import evaluate - suite = evaluate.EvaluationSuite.load("Vipitis/ShaderEval") - model_cp = "gpt2" - suite.run(model_cp, snippet=300) - ``` - - ### try the demo below - - Select a **model checkpoint** from the "dropdown" - - Select how many **samples** to run (there us up to 300 from the test set) - - Click **Run** to run the suite - - The results will be displayed in the **Output** box - - ## Results - ![](file/bar.png) - Additionally, you can report results to your models and it should show up on this [leaderboard](https://huggingface.co/spaces/autoevaluate/leaderboards?dataset=Vipitis%2FShadertoys-fine) - - ## Todo (feel free to contribute in a [Pull Request](https://huggingface.co/spaces/Vipitis/ShaderEval/discussions?status=open&type=pull_request)) - - [~] leaderboard (via autoevaluate and self reporting) - - [?] supporting batches to speed up inference - - [ ] CER metric (via a custom metric perhaps?) - - [x] removing the pad_token warning - - [ ] adding OpenVINO pipelines for inference, pending on OpenVINO release - - [ ] task1b for "better" featuring a improved testset as well as better metrics. Will allow more generation parameters - - [ ] semantic match by comparing the rendered frames (depending on WGPU implementation?) - - [ ] CLIP match to evaluate rendered images fitting to title/description - """ - - -def run_suite(model_cp, snippet): - # print(model_cp, snippet) - results = suite.run(model_cp, snippet) - print(results) # so they show up in the logs for me. - return results[0] - -with gr.Blocks() as site: - text_md = gr.Markdown(text) - model_cp = gr.Textbox(value="gpt2", label="Model Checkpoint", interactive=True) - first_n = gr.Slider(minimum=1, maximum=300, default=5, label="num_samples", step=1.0) - output = gr.Textbox(label="Output") - run_button = gr.Button(label="Run") - run_button.click(fn=run_suite, inputs=[model_cp, first_n], outputs=output) -site.launch() diff --git a/spaces/WhyLIM/ChatGPT-academic/crazy_functions/test_project/python/dqn/dqn.py b/spaces/WhyLIM/ChatGPT-academic/crazy_functions/test_project/python/dqn/dqn.py deleted file mode 100644 index 6cea64d39baa7ff4c1e549869aaa4b0ae17779a9..0000000000000000000000000000000000000000 --- a/spaces/WhyLIM/ChatGPT-academic/crazy_functions/test_project/python/dqn/dqn.py +++ /dev/null @@ -1,245 +0,0 @@ -from typing import Any, Dict, List, Optional, Tuple, Type, Union - -import gym -import numpy as np -import torch as th -from torch.nn import functional as F - -from stable_baselines3.common import logger -from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm -from stable_baselines3.common.preprocessing import maybe_transpose -from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule -from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update -from stable_baselines3.dqn.policies import DQNPolicy - - -class DQN(OffPolicyAlgorithm): - """ - Deep Q-Network (DQN) - - Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236 - Default hyperparameters are taken from the nature paper, - except for the optimizer and learning rate that were taken from Stable Baselines defaults. - - :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) - :param env: The environment to learn from (if registered in Gym, can be str) - :param learning_rate: The learning rate, it can be a function - of the current progress remaining (from 1 to 0) - :param buffer_size: size of the replay buffer - :param learning_starts: how many steps of the model to collect transitions for before learning starts - :param batch_size: Minibatch size for each gradient update - :param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update - :param gamma: the discount factor - :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit - like ``(5, "step")`` or ``(2, "episode")``. - :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) - Set to ``-1`` means to do as many gradient steps as steps done in the environment - during the rollout. - :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer - at a cost of more complexity. - See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 - :param target_update_interval: update the target network every ``target_update_interval`` - environment steps. - :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced - :param exploration_initial_eps: initial value of random action probability - :param exploration_final_eps: final value of random action probability - :param max_grad_norm: The maximum value for the gradient clipping - :param tensorboard_log: the log location for tensorboard (if None, no logging) - :param create_eval_env: Whether to create a second environment that will be - used for evaluating the agent periodically. (Only available when passing string for the environment) - :param policy_kwargs: additional arguments to be passed to the policy on creation - :param verbose: the verbosity level: 0 no output, 1 info, 2 debug - :param seed: Seed for the pseudo random generators - :param device: Device (cpu, cuda, ...) on which the code should be run. - Setting it to auto, the code will be run on the GPU if possible. - :param _init_setup_model: Whether or not to build the network at the creation of the instance - """ - - def __init__( - self, - policy: Union[str, Type[DQNPolicy]], - env: Union[GymEnv, str], - learning_rate: Union[float, Schedule] = 1e-4, - buffer_size: int = 1000000, - learning_starts: int = 50000, - batch_size: Optional[int] = 32, - tau: float = 1.0, - gamma: float = 0.99, - train_freq: Union[int, Tuple[int, str]] = 4, - gradient_steps: int = 1, - optimize_memory_usage: bool = False, - target_update_interval: int = 10000, - exploration_fraction: float = 0.1, - exploration_initial_eps: float = 1.0, - exploration_final_eps: float = 0.05, - max_grad_norm: float = 10, - tensorboard_log: Optional[str] = None, - create_eval_env: bool = False, - policy_kwargs: Optional[Dict[str, Any]] = None, - verbose: int = 0, - seed: Optional[int] = None, - device: Union[th.device, str] = "auto", - _init_setup_model: bool = True, - ): - - super(DQN, self).__init__( - policy, - env, - DQNPolicy, - learning_rate, - buffer_size, - learning_starts, - batch_size, - tau, - gamma, - train_freq, - gradient_steps, - action_noise=None, # No action noise - policy_kwargs=policy_kwargs, - tensorboard_log=tensorboard_log, - verbose=verbose, - device=device, - create_eval_env=create_eval_env, - seed=seed, - sde_support=False, - optimize_memory_usage=optimize_memory_usage, - supported_action_spaces=(gym.spaces.Discrete,), - ) - - self.exploration_initial_eps = exploration_initial_eps - self.exploration_final_eps = exploration_final_eps - self.exploration_fraction = exploration_fraction - self.target_update_interval = target_update_interval - self.max_grad_norm = max_grad_norm - # "epsilon" for the epsilon-greedy exploration - self.exploration_rate = 0.0 - # Linear schedule will be defined in `_setup_model()` - self.exploration_schedule = None - self.q_net, self.q_net_target = None, None - - if _init_setup_model: - self._setup_model() - - def _setup_model(self) -> None: - super(DQN, self)._setup_model() - self._create_aliases() - self.exploration_schedule = get_linear_fn( - self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction - ) - - def _create_aliases(self) -> None: - self.q_net = self.policy.q_net - self.q_net_target = self.policy.q_net_target - - def _on_step(self) -> None: - """ - Update the exploration rate and target network if needed. - This method is called in ``collect_rollouts()`` after each step in the environment. - """ - if self.num_timesteps % self.target_update_interval == 0: - polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau) - - self.exploration_rate = self.exploration_schedule(self._current_progress_remaining) - logger.record("rollout/exploration rate", self.exploration_rate) - - def train(self, gradient_steps: int, batch_size: int = 100) -> None: - # Update learning rate according to schedule - self._update_learning_rate(self.policy.optimizer) - - losses = [] - for _ in range(gradient_steps): - # Sample replay buffer - replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) - - with th.no_grad(): - # Compute the next Q-values using the target network - next_q_values = self.q_net_target(replay_data.next_observations) - # Follow greedy policy: use the one with the highest value - next_q_values, _ = next_q_values.max(dim=1) - # Avoid potential broadcast issue - next_q_values = next_q_values.reshape(-1, 1) - # 1-step TD target - target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values - - # Get current Q-values estimates - current_q_values = self.q_net(replay_data.observations) - - # Retrieve the q-values for the actions from the replay buffer - current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long()) - - # Compute Huber loss (less sensitive to outliers) - loss = F.smooth_l1_loss(current_q_values, target_q_values) - losses.append(loss.item()) - - # Optimize the policy - self.policy.optimizer.zero_grad() - loss.backward() - # Clip gradient norm - th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm) - self.policy.optimizer.step() - - # Increase update counter - self._n_updates += gradient_steps - - logger.record("train/n_updates", self._n_updates, exclude="tensorboard") - logger.record("train/loss", np.mean(losses)) - - def predict( - self, - observation: np.ndarray, - state: Optional[np.ndarray] = None, - mask: Optional[np.ndarray] = None, - deterministic: bool = False, - ) -> Tuple[np.ndarray, Optional[np.ndarray]]: - """ - Overrides the base_class predict function to include epsilon-greedy exploration. - - :param observation: the input observation - :param state: The last states (can be None, used in recurrent policies) - :param mask: The last masks (can be None, used in recurrent policies) - :param deterministic: Whether or not to return deterministic actions. - :return: the model's action and the next state - (used in recurrent policies) - """ - if not deterministic and np.random.rand() < self.exploration_rate: - if is_vectorized_observation(maybe_transpose(observation, self.observation_space), self.observation_space): - n_batch = observation.shape[0] - action = np.array([self.action_space.sample() for _ in range(n_batch)]) - else: - action = np.array(self.action_space.sample()) - else: - action, state = self.policy.predict(observation, state, mask, deterministic) - return action, state - - def learn( - self, - total_timesteps: int, - callback: MaybeCallback = None, - log_interval: int = 4, - eval_env: Optional[GymEnv] = None, - eval_freq: int = -1, - n_eval_episodes: int = 5, - tb_log_name: str = "DQN", - eval_log_path: Optional[str] = None, - reset_num_timesteps: bool = True, - ) -> OffPolicyAlgorithm: - - return super(DQN, self).learn( - total_timesteps=total_timesteps, - callback=callback, - log_interval=log_interval, - eval_env=eval_env, - eval_freq=eval_freq, - n_eval_episodes=n_eval_episodes, - tb_log_name=tb_log_name, - eval_log_path=eval_log_path, - reset_num_timesteps=reset_num_timesteps, - ) - - def _excluded_save_params(self) -> List[str]: - return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"] - - def _get_torch_save_params(self) -> Tuple[List[str], List[str]]: - state_dicts = ["policy", "policy.optimizer"] - - return state_dicts, [] diff --git a/spaces/XzJosh/Ava2-Bert-VITS2/attentions.py b/spaces/XzJosh/Ava2-Bert-VITS2/attentions.py deleted file mode 100644 index 1192dd7268c20c11010e73a6017ed09549695afe..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Ava2-Bert-VITS2/attentions.py +++ /dev/null @@ -1,344 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import logging - -logger = logging.getLogger(__name__) - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, isflow = True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - #if isflow: - # cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1) - # self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1) - # self.cond_layer = weight_norm(cond_layer, name='weight') - # self.gin_channels = 256 - self.cond_layer_idx = self.n_layers - if 'gin_channels' in kwargs: - self.gin_channels = kwargs['gin_channels'] - if self.gin_channels != 0: - self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels) - # vits2 says 3rd block, so idx is 2 by default - self.cond_layer_idx = kwargs['cond_layer_idx'] if 'cond_layer_idx' in kwargs else 2 - logging.debug(self.gin_channels, self.cond_layer_idx) - assert self.cond_layer_idx < self.n_layers, 'cond_layer_idx should be less than n_layers' - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - def forward(self, x, x_mask, g=None): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - if i == self.cond_layer_idx and g is not None: - g = self.spk_emb_linear(g.transpose(1, 2)) - g = g.transpose(1, 2) - x = x + g - x = x * x_mask - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/Yina/google-pix2struct-base/README.md b/spaces/Yina/google-pix2struct-base/README.md deleted file mode 100644 index 41b91e65a3288247d366948bf4dd8b88d09eada1..0000000000000000000000000000000000000000 --- a/spaces/Yina/google-pix2struct-base/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Google Pix2struct Base -emoji: 🏢 -colorFrom: gray -colorTo: pink -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Yiqin/ChatVID/model/vision/grit_src/grit/custom_solver.py b/spaces/Yiqin/ChatVID/model/vision/grit_src/grit/custom_solver.py deleted file mode 100644 index 87f7d61ed756acf9326b7ab4097a989a9e6c7532..0000000000000000000000000000000000000000 --- a/spaces/Yiqin/ChatVID/model/vision/grit_src/grit/custom_solver.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -# Modified by Jialian Wu from https://github.com/facebookresearch/Detic/blob/main/detic/custom_solver.py -import itertools -from typing import Any, Callable, Dict, Iterable, List, Set, Type, Union -import torch - -from detectron2.config import CfgNode - -from detectron2.solver.build import maybe_add_gradient_clipping - - -def build_custom_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer: - params: List[Dict[str, Any]] = [] - memo: Set[torch.nn.parameter.Parameter] = set() - optimizer_type = cfg.SOLVER.OPTIMIZER - - for key, value in model.named_parameters(recurse=True): - if not value.requires_grad: - continue - # Avoid duplicating parameters - if value in memo: - continue - memo.add(value) - lr = cfg.SOLVER.BASE_LR - weight_decay = cfg.SOLVER.WEIGHT_DECAY - - if cfg.SOLVER.VIT_LAYER_DECAY: - lr = lr * get_vit_lr_decay_rate(key, cfg.SOLVER.VIT_LAYER_DECAY_RATE, cfg.MODEL.VIT_LAYERS) - - param = {"params": [value], "lr": lr} - if optimizer_type != 'ADAMW': - param['weight_decay'] = weight_decay - params += [param] - - def maybe_add_full_model_gradient_clipping(optim): # optim: the optimizer class - # detectron2 doesn't have full model gradient clipping now - clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE - enable = ( - cfg.SOLVER.CLIP_GRADIENTS.ENABLED - and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" - and clip_norm_val > 0.0 - ) - - class FullModelGradientClippingOptimizer(optim): - def step(self, closure=None): - all_params = itertools.chain(*[x["params"] for x in self.param_groups]) - torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) - super().step(closure=closure) - - return FullModelGradientClippingOptimizer if enable else optim - - - if optimizer_type == 'SGD': - optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( - params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM, - nesterov=cfg.SOLVER.NESTEROV - ) - elif optimizer_type == 'ADAMW': - optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( - params, cfg.SOLVER.BASE_LR, - weight_decay=cfg.SOLVER.WEIGHT_DECAY - ) - else: - raise NotImplementedError(f"no optimizer type {optimizer_type}") - if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": - optimizer = maybe_add_gradient_clipping(cfg, optimizer) - return optimizer - - -def get_vit_lr_decay_rate(name, lr_decay_rate=1.0, num_layers=12): - """ - Calculate lr decay rate for different ViT blocks. - Args: - name (string): parameter name. - lr_decay_rate (float): base lr decay rate. - num_layers (int): number of ViT blocks. - - Returns: - lr decay rate for the given parameter. - """ - layer_id = num_layers + 1 - if name.startswith("backbone"): - if ".pos_embed" in name or ".patch_embed" in name: - layer_id = 0 - elif ".blocks." in name and ".residual." not in name: - layer_id = int(name[name.find(".blocks.") :].split(".")[2]) + 1 - - return lr_decay_rate ** (num_layers + 1 - layer_id) \ No newline at end of file diff --git a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/vision.cpp b/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/vision.cpp deleted file mode 100644 index c9a2cd4f20e6f58be1c5783d67c64232dd59b560..0000000000000000000000000000000000000000 --- a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/vision.cpp +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. - -#include -#include "ROIAlignRotated/ROIAlignRotated.h" -#include "box_iou_rotated/box_iou_rotated.h" -#include "cocoeval/cocoeval.h" -#include "deformable/deform_conv.h" -#include "nms_rotated/nms_rotated.h" - -namespace detectron2 { - -#if defined(WITH_CUDA) || defined(WITH_HIP) -extern int get_cudart_version(); -#endif - -std::string get_cuda_version() { -#if defined(WITH_CUDA) || defined(WITH_HIP) - std::ostringstream oss; - -#if defined(WITH_CUDA) - oss << "CUDA "; -#else - oss << "HIP "; -#endif - - // copied from - // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/CUDAHooks.cpp#L231 - auto printCudaStyleVersion = [&](int v) { - oss << (v / 1000) << "." << (v / 10 % 100); - if (v % 10 != 0) { - oss << "." << (v % 10); - } - }; - printCudaStyleVersion(get_cudart_version()); - return oss.str(); -#else // neither CUDA nor HIP - return std::string("not available"); -#endif -} - -bool has_cuda() { -#if defined(WITH_CUDA) - return true; -#else - return false; -#endif -} - -// similar to -// https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Version.cpp -std::string get_compiler_version() { - std::ostringstream ss; -#if defined(__GNUC__) -#ifndef __clang__ - -#if ((__GNUC__ <= 4) && (__GNUC_MINOR__ <= 8)) -#error "GCC >= 4.9 is required!" -#endif - - { ss << "GCC " << __GNUC__ << "." << __GNUC_MINOR__; } -#endif -#endif - -#if defined(__clang_major__) - { - ss << "clang " << __clang_major__ << "." << __clang_minor__ << "." - << __clang_patchlevel__; - } -#endif - -#if defined(_MSC_VER) - { ss << "MSVC " << _MSC_FULL_VER; } -#endif - return ss.str(); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("get_compiler_version", &get_compiler_version, "get_compiler_version"); - m.def("get_cuda_version", &get_cuda_version, "get_cuda_version"); - m.def("has_cuda", &has_cuda, "has_cuda"); - - m.def("deform_conv_forward", &deform_conv_forward, "deform_conv_forward"); - m.def( - "deform_conv_backward_input", - &deform_conv_backward_input, - "deform_conv_backward_input"); - m.def( - "deform_conv_backward_filter", - &deform_conv_backward_filter, - "deform_conv_backward_filter"); - m.def( - "modulated_deform_conv_forward", - &modulated_deform_conv_forward, - "modulated_deform_conv_forward"); - m.def( - "modulated_deform_conv_backward", - &modulated_deform_conv_backward, - "modulated_deform_conv_backward"); - - m.def("COCOevalAccumulate", &COCOeval::Accumulate, "COCOeval::Accumulate"); - m.def( - "COCOevalEvaluateImages", - &COCOeval::EvaluateImages, - "COCOeval::EvaluateImages"); - pybind11::class_(m, "InstanceAnnotation") - .def(pybind11::init()); - pybind11::class_(m, "ImageEvaluation") - .def(pybind11::init<>()); -} - -TORCH_LIBRARY(detectron2, m) { - m.def("nms_rotated", &nms_rotated); - m.def("box_iou_rotated", &box_iou_rotated); - m.def("roi_align_rotated_forward", &ROIAlignRotated_forward); - m.def("roi_align_rotated_backward", &ROIAlignRotated_backward); -} -} // namespace detectron2 diff --git a/spaces/Yuliang/ECON/lib/pymafx/utils/io.py b/spaces/Yuliang/ECON/lib/pymafx/utils/io.py deleted file mode 100644 index 67d5b50542ef8f831ac59d46947631ae8f2cc78e..0000000000000000000000000000000000000000 --- a/spaces/Yuliang/ECON/lib/pymafx/utils/io.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright (c) 2017-present, Facebook, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -############################################################################## -"""IO utilities.""" - -from __future__ import ( - absolute_import, - division, - print_function, - unicode_literals, -) - -import hashlib -import logging -import os -import re -import sys - -from six.moves import cPickle as pickle - -try: - from urllib.request import urlopen -except ImportError: #python2 - from urllib2 import urlopen - -logger = logging.getLogger(__name__) - -_DETECTRON_S3_BASE_URL = 'https://s3-us-west-2.amazonaws.com/detectron' - - -def save_object(obj, file_name): - """Save a Python object by pickling it.""" - file_name = os.path.abspath(file_name) - with open(file_name, 'wb') as f: - pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) - - -def cache_url(url_or_file, cache_dir): - """Download the file specified by the URL to the cache_dir and return the - path to the cached file. If the argument is not a URL, simply return it as - is. - """ - is_url = re.match(r'^(?:http)s?://', url_or_file, re.IGNORECASE) is not None - - if not is_url: - return url_or_file - - url = url_or_file - # assert url.startswith(_DETECTRON_S3_BASE_URL), \ - # ('Detectron only automatically caches URLs in the Detectron S3 ' - # 'bucket: {}').format(_DETECTRON_S3_BASE_URL) - # - # cache_file_path = url.replace(_DETECTRON_S3_BASE_URL, cache_dir) - Len_filename = len(url.split('/')[-1]) - BASE_URL = url[0:-Len_filename - 1] - # - cache_file_path = url.replace(BASE_URL, cache_dir) - if os.path.exists(cache_file_path): - # assert_cache_file_is_ok(url, cache_file_path) - return cache_file_path - - cache_file_dir = os.path.dirname(cache_file_path) - if not os.path.exists(cache_file_dir): - os.makedirs(cache_file_dir) - - logger.info('Downloading remote file {} to {}'.format(url, cache_file_path)) - download_url(url, cache_file_path) - # assert_cache_file_is_ok(url, cache_file_path) - return cache_file_path - - -def assert_cache_file_is_ok(url, file_path): - """Check that cache file has the correct hash.""" - # File is already in the cache, verify that the md5sum matches and - # return local path - cache_file_md5sum = _get_file_md5sum(file_path) - ref_md5sum = _get_reference_md5sum(url) - assert cache_file_md5sum == ref_md5sum, \ - ('Target URL {} appears to be downloaded to the local cache file ' - '{}, but the md5 hash of the local file does not match the ' - 'reference (actual: {} vs. expected: {}). You may wish to delete ' - 'the cached file and try again to trigger automatic ' - 'download.').format(url, file_path, cache_file_md5sum, ref_md5sum) - - -def _progress_bar(count, total): - """Report download progress. - Credit: - https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console/27871113 - """ - bar_len = 60 - filled_len = int(round(bar_len * count / float(total))) - - percents = round(100.0 * count / float(total), 1) - bar = '=' * filled_len + '-' * (bar_len - filled_len) - - sys.stdout.write(' [{}] {}% of {:.1f}MB file \r'.format(bar, percents, total / 1024 / 1024)) - sys.stdout.flush() - if count >= total: - sys.stdout.write('\n') - - -def download_url(url, dst_file_path, chunk_size=8192, progress_hook=_progress_bar): - """Download url and write it to dst_file_path. - Credit: - https://stackoverflow.com/questions/2028517/python-urllib2-progress-hook - """ - response = urlopen(url) - total_size = response.info().getheader('Content-Length').strip() - total_size = int(total_size) - bytes_so_far = 0 - - with open(dst_file_path, 'wb') as f: - while 1: - chunk = response.read(chunk_size) - bytes_so_far += len(chunk) - if not chunk: - break - if progress_hook: - progress_hook(bytes_so_far, total_size) - f.write(chunk) - - return bytes_so_far - - -def _get_file_md5sum(file_name): - """Compute the md5 hash of a file.""" - hash_obj = hashlib.md5() - with open(file_name, 'r') as f: - hash_obj.update(f.read()) - return hash_obj.hexdigest() - - -def _get_reference_md5sum(url): - """By convention the md5 hash for url is stored in url + '.md5sum'.""" - url_md5sum = url + '.md5sum' - md5sum = urlopen(url_md5sum).read().strip() - return md5sum diff --git a/spaces/YumingYuan/Latex_OCR/app.py b/spaces/YumingYuan/Latex_OCR/app.py deleted file mode 100644 index 798a7a6fe6e51f4405daee7211958181d29454af..0000000000000000000000000000000000000000 --- a/spaces/YumingYuan/Latex_OCR/app.py +++ /dev/null @@ -1,20 +0,0 @@ -import gradio as gr -from PIL import Image -from pix2tex.cli import LatexOCR - -# Receives an image and returns the latex code and the markdown code -def img_latex(image): - img = Image.open(image) - model = LatexOCR() - latex = model(img) - markdown = latex - markdown = r'$$' + latex + r'$$' - return latex, markdown - - -gr.Interface( - img_latex, - inputs=gr.inputs.Image(label="Upload", type="filepath"), - outputs=[gr.outputs.Textbox(label="Latex"),gr.outputs.Textbox(label="Markdown")], - title="Latex OCR", -).launch() diff --git a/spaces/ZJunTvT/ZJunChat/modules/models.py b/spaces/ZJunTvT/ZJunChat/modules/models.py deleted file mode 100644 index 25b18b1904910e183a997a763008403d960868d6..0000000000000000000000000000000000000000 --- a/spaces/ZJunTvT/ZJunChat/modules/models.py +++ /dev/null @@ -1,625 +0,0 @@ -from __future__ import annotations -from typing import TYPE_CHECKING, List - -import logging -import json -import commentjson as cjson -import os -import sys -import requests -import urllib3 -import platform -import base64 -from io import BytesIO -from PIL import Image - -from tqdm import tqdm -import colorama -from duckduckgo_search import ddg -import asyncio -import aiohttp -from enum import Enum -import uuid - -from .presets import * -from .llama_func import * -from .utils import * -from . import shared -from .config import retrieve_proxy -from modules import config -from .base_model import BaseLLMModel, ModelType - - -class OpenAIClient(BaseLLMModel): - def __init__( - self, - model_name, - api_key, - system_prompt=INITIAL_SYSTEM_PROMPT, - temperature=1.0, - top_p=1.0, - ) -> None: - super().__init__( - model_name=model_name, - temperature=temperature, - top_p=top_p, - system_prompt=system_prompt, - ) - self.api_key = api_key - self.need_api_key = True - self._refresh_header() - - def get_answer_stream_iter(self): - response = self._get_response(stream=True) - if response is not None: - iter = self._decode_chat_response(response) - partial_text = "" - for i in iter: - partial_text += i - yield partial_text - else: - yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG - - def get_answer_at_once(self): - response = self._get_response() - response = json.loads(response.text) - content = response["choices"][0]["message"]["content"] - total_token_count = response["usage"]["total_tokens"] - return content, total_token_count - - def count_token(self, user_input): - input_token_count = count_token(construct_user(user_input)) - if self.system_prompt is not None and len(self.all_token_counts) == 0: - system_prompt_token_count = count_token( - construct_system(self.system_prompt) - ) - return input_token_count + system_prompt_token_count - return input_token_count - - def billing_info(self): - try: - curr_time = datetime.datetime.now() - last_day_of_month = get_last_day_of_month( - curr_time).strftime("%Y-%m-%d") - first_day_of_month = curr_time.replace(day=1).strftime("%Y-%m-%d") - usage_url = f"{shared.state.usage_api_url}?start_date={first_day_of_month}&end_date={last_day_of_month}" - try: - usage_data = self._get_billing_data(usage_url) - except Exception as e: - logging.error(f"获取API使用情况失败:" + str(e)) - return i18n("**获取API使用情况失败**") - rounded_usage = "{:.5f}".format(usage_data["total_usage"] / 100) - return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}" - except requests.exceptions.ConnectTimeout: - status_text = ( - STANDARD_ERROR_MSG + CONNECTION_TIMEOUT_MSG + ERROR_RETRIEVE_MSG - ) - return status_text - except requests.exceptions.ReadTimeout: - status_text = STANDARD_ERROR_MSG + READ_TIMEOUT_MSG + ERROR_RETRIEVE_MSG - return status_text - except Exception as e: - import traceback - traceback.print_exc() - logging.error(i18n("获取API使用情况失败:") + str(e)) - return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG - - def set_token_upper_limit(self, new_upper_limit): - pass - - @shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用 - def _get_response(self, stream=False): - openai_api_key = self.api_key - system_prompt = self.system_prompt - history = self.history - logging.debug(colorama.Fore.YELLOW + - f"{history}" + colorama.Fore.RESET) - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {openai_api_key}", - } - - if system_prompt is not None: - history = [construct_system(system_prompt), *history] - - payload = { - "model": self.model_name, - "messages": history, - "temperature": self.temperature, - "top_p": self.top_p, - "n": self.n_choices, - "stream": stream, - "presence_penalty": self.presence_penalty, - "frequency_penalty": self.frequency_penalty, - } - - if self.max_generation_token is not None: - payload["max_tokens"] = self.max_generation_token - if self.stop_sequence is not None: - payload["stop"] = self.stop_sequence - if self.logit_bias is not None: - payload["logit_bias"] = self.logit_bias - if self.user_identifier is not None: - payload["user"] = self.user_identifier - - if stream: - timeout = TIMEOUT_STREAMING - else: - timeout = TIMEOUT_ALL - - # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求 - if shared.state.completion_url != COMPLETION_URL: - logging.info(f"使用自定义API URL: {shared.state.completion_url}") - - with retrieve_proxy(): - try: - response = requests.post( - shared.state.completion_url, - headers=headers, - json=payload, - stream=stream, - timeout=timeout, - ) - except: - return None - return response - - def _refresh_header(self): - self.headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {self.api_key}", - } - - def _get_billing_data(self, billing_url): - with retrieve_proxy(): - response = requests.get( - billing_url, - headers=self.headers, - timeout=TIMEOUT_ALL, - ) - - if response.status_code == 200: - data = response.json() - return data - else: - raise Exception( - f"API request failed with status code {response.status_code}: {response.text}" - ) - - def _decode_chat_response(self, response): - error_msg = "" - for chunk in response.iter_lines(): - if chunk: - chunk = chunk.decode() - chunk_length = len(chunk) - try: - chunk = json.loads(chunk[6:]) - except json.JSONDecodeError: - print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}") - error_msg += chunk - continue - if chunk_length > 6 and "delta" in chunk["choices"][0]: - if chunk["choices"][0]["finish_reason"] == "stop": - break - try: - yield chunk["choices"][0]["delta"]["content"] - except Exception as e: - # logging.error(f"Error: {e}") - continue - if error_msg: - raise Exception(error_msg) - - def set_key(self, new_access_key): - ret = super().set_key(new_access_key) - self._refresh_header() - return ret - - -class ChatGLM_Client(BaseLLMModel): - def __init__(self, model_name) -> None: - super().__init__(model_name=model_name) - from transformers import AutoTokenizer, AutoModel - import torch - global CHATGLM_TOKENIZER, CHATGLM_MODEL - if CHATGLM_TOKENIZER is None or CHATGLM_MODEL is None: - system_name = platform.system() - model_path = None - if os.path.exists("models"): - model_dirs = os.listdir("models") - if model_name in model_dirs: - model_path = f"models/{model_name}" - if model_path is not None: - model_source = model_path - else: - model_source = f"THUDM/{model_name}" - CHATGLM_TOKENIZER = AutoTokenizer.from_pretrained( - model_source, trust_remote_code=True - ) - quantified = False - if "int4" in model_name: - quantified = True - model = AutoModel.from_pretrained( - model_source, trust_remote_code=True - ) - if torch.cuda.is_available(): - # run on CUDA - logging.info("CUDA is available, using CUDA") - model = model.half().cuda() - # mps加速还存在一些问题,暂时不使用 - elif system_name == "Darwin" and model_path is not None and not quantified: - logging.info("Running on macOS, using MPS") - # running on macOS and model already downloaded - model = model.half().to("mps") - else: - logging.info("GPU is not available, using CPU") - model = model.float() - model = model.eval() - CHATGLM_MODEL = model - - def _get_glm_style_input(self): - history = [x["content"] for x in self.history] - query = history.pop() - logging.debug(colorama.Fore.YELLOW + - f"{history}" + colorama.Fore.RESET) - assert ( - len(history) % 2 == 0 - ), f"History should be even length. current history is: {history}" - history = [[history[i], history[i + 1]] - for i in range(0, len(history), 2)] - return history, query - - def get_answer_at_once(self): - history, query = self._get_glm_style_input() - response, _ = CHATGLM_MODEL.chat( - CHATGLM_TOKENIZER, query, history=history) - return response, len(response) - - def get_answer_stream_iter(self): - history, query = self._get_glm_style_input() - for response, history in CHATGLM_MODEL.stream_chat( - CHATGLM_TOKENIZER, - query, - history, - max_length=self.token_upper_limit, - top_p=self.top_p, - temperature=self.temperature, - ): - yield response - - -class LLaMA_Client(BaseLLMModel): - def __init__( - self, - model_name, - lora_path=None, - ) -> None: - super().__init__(model_name=model_name) - from lmflow.datasets.dataset import Dataset - from lmflow.pipeline.auto_pipeline import AutoPipeline - from lmflow.models.auto_model import AutoModel - from lmflow.args import ModelArguments, DatasetArguments, InferencerArguments - - self.max_generation_token = 1000 - self.end_string = "\n\n" - # We don't need input data - data_args = DatasetArguments(dataset_path=None) - self.dataset = Dataset(data_args) - self.system_prompt = "" - - global LLAMA_MODEL, LLAMA_INFERENCER - if LLAMA_MODEL is None or LLAMA_INFERENCER is None: - model_path = None - if os.path.exists("models"): - model_dirs = os.listdir("models") - if model_name in model_dirs: - model_path = f"models/{model_name}" - if model_path is not None: - model_source = model_path - else: - model_source = f"decapoda-research/{model_name}" - # raise Exception(f"models目录下没有这个模型: {model_name}") - if lora_path is not None: - lora_path = f"lora/{lora_path}" - model_args = ModelArguments(model_name_or_path=model_source, lora_model_path=lora_path, model_type=None, config_overrides=None, config_name=None, tokenizer_name=None, cache_dir=None, - use_fast_tokenizer=True, model_revision='main', use_auth_token=False, torch_dtype=None, use_lora=False, lora_r=8, lora_alpha=32, lora_dropout=0.1, use_ram_optimized_load=True) - pipeline_args = InferencerArguments( - local_rank=0, random_seed=1, deepspeed='configs/ds_config_chatbot.json', mixed_precision='bf16') - - with open(pipeline_args.deepspeed, "r") as f: - ds_config = json.load(f) - LLAMA_MODEL = AutoModel.get_model( - model_args, - tune_strategy="none", - ds_config=ds_config, - ) - LLAMA_INFERENCER = AutoPipeline.get_pipeline( - pipeline_name="inferencer", - model_args=model_args, - data_args=data_args, - pipeline_args=pipeline_args, - ) - - def _get_llama_style_input(self): - history = [] - instruction = "" - if self.system_prompt: - instruction = (f"Instruction: {self.system_prompt}\n") - for x in self.history: - if x["role"] == "user": - history.append(f"{instruction}Input: {x['content']}") - else: - history.append(f"Output: {x['content']}") - context = "\n\n".join(history) - context += "\n\nOutput: " - return context - - def get_answer_at_once(self): - context = self._get_llama_style_input() - - input_dataset = self.dataset.from_dict( - {"type": "text_only", "instances": [{"text": context}]} - ) - - output_dataset = LLAMA_INFERENCER.inference( - model=LLAMA_MODEL, - dataset=input_dataset, - max_new_tokens=self.max_generation_token, - temperature=self.temperature, - ) - - response = output_dataset.to_dict()["instances"][0]["text"] - return response, len(response) - - def get_answer_stream_iter(self): - context = self._get_llama_style_input() - partial_text = "" - step = 1 - for _ in range(0, self.max_generation_token, step): - input_dataset = self.dataset.from_dict( - {"type": "text_only", "instances": [ - {"text": context + partial_text}]} - ) - output_dataset = LLAMA_INFERENCER.inference( - model=LLAMA_MODEL, - dataset=input_dataset, - max_new_tokens=step, - temperature=self.temperature, - ) - response = output_dataset.to_dict()["instances"][0]["text"] - if response == "" or response == self.end_string: - break - partial_text += response - yield partial_text - - -class XMChat(BaseLLMModel): - def __init__(self, api_key): - super().__init__(model_name="xmchat") - self.api_key = api_key - self.session_id = None - self.reset() - self.image_bytes = None - self.image_path = None - self.xm_history = [] - self.url = "https://xmbot.net/web" - self.last_conv_id = None - - def reset(self): - self.session_id = str(uuid.uuid4()) - self.last_conv_id = None - return [], "已重置" - - def image_to_base64(self, image_path): - # 打开并加载图片 - img = Image.open(image_path) - - # 获取图片的宽度和高度 - width, height = img.size - - # 计算压缩比例,以确保最长边小于4096像素 - max_dimension = 2048 - scale_ratio = min(max_dimension / width, max_dimension / height) - - if scale_ratio < 1: - # 按压缩比例调整图片大小 - new_width = int(width * scale_ratio) - new_height = int(height * scale_ratio) - img = img.resize((new_width, new_height), Image.ANTIALIAS) - - # 将图片转换为jpg格式的二进制数据 - buffer = BytesIO() - if img.mode == "RGBA": - img = img.convert("RGB") - img.save(buffer, format='JPEG') - binary_image = buffer.getvalue() - - # 对二进制数据进行Base64编码 - base64_image = base64.b64encode(binary_image).decode('utf-8') - - return base64_image - - def try_read_image(self, filepath): - def is_image_file(filepath): - # 判断文件是否为图片 - valid_image_extensions = [".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff"] - file_extension = os.path.splitext(filepath)[1].lower() - return file_extension in valid_image_extensions - - if is_image_file(filepath): - logging.info(f"读取图片文件: {filepath}") - self.image_bytes = self.image_to_base64(filepath) - self.image_path = filepath - else: - self.image_bytes = None - self.image_path = None - - def like(self): - if self.last_conv_id is None: - return "点赞失败,你还没发送过消息" - data = { - "uuid": self.last_conv_id, - "appraise": "good" - } - response = requests.post(self.url, json=data) - return "👍点赞成功,,感谢反馈~" - - def dislike(self): - if self.last_conv_id is None: - return "点踩失败,你还没发送过消息" - data = { - "uuid": self.last_conv_id, - "appraise": "bad" - } - response = requests.post(self.url, json=data) - return "👎点踩成功,感谢反馈~" - - def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot): - fake_inputs = real_inputs - display_append = "" - limited_context = False - return limited_context, fake_inputs, display_append, real_inputs, chatbot - - def handle_file_upload(self, files, chatbot): - """if the model accepts multi modal input, implement this function""" - if files: - for file in files: - if file.name: - logging.info(f"尝试读取图像: {file.name}") - self.try_read_image(file.name) - if self.image_path is not None: - chatbot = chatbot + [((self.image_path,), None)] - if self.image_bytes is not None: - logging.info("使用图片作为输入") - # XMChat的一轮对话中实际上只能处理一张图片 - self.reset() - conv_id = str(uuid.uuid4()) - data = { - "user_id": self.api_key, - "session_id": self.session_id, - "uuid": conv_id, - "data_type": "imgbase64", - "data": self.image_bytes - } - response = requests.post(self.url, json=data) - response = json.loads(response.text) - logging.info(f"图片回复: {response['data']}") - return None, chatbot, None - - def get_answer_at_once(self): - question = self.history[-1]["content"] - conv_id = str(uuid.uuid4()) - self.last_conv_id = conv_id - data = { - "user_id": self.api_key, - "session_id": self.session_id, - "uuid": conv_id, - "data_type": "text", - "data": question - } - response = requests.post(self.url, json=data) - try: - response = json.loads(response.text) - return response["data"], len(response["data"]) - except Exception as e: - return response.text, len(response.text) - - - - -def get_model( - model_name, - lora_model_path=None, - access_key=None, - temperature=None, - top_p=None, - system_prompt=None, -) -> BaseLLMModel: - msg = i18n("模型设置为了:") + f" {model_name}" - model_type = ModelType.get_type(model_name) - lora_selector_visibility = False - lora_choices = [] - dont_change_lora_selector = False - if model_type != ModelType.OpenAI: - config.local_embedding = True - # del current_model.model - model = None - try: - if model_type == ModelType.OpenAI: - logging.info(f"正在加载OpenAI模型: {model_name}") - model = OpenAIClient( - model_name=model_name, - api_key=access_key, - system_prompt=system_prompt, - temperature=temperature, - top_p=top_p, - ) - elif model_type == ModelType.ChatGLM: - logging.info(f"正在加载ChatGLM模型: {model_name}") - model = ChatGLM_Client(model_name) - elif model_type == ModelType.LLaMA and lora_model_path == "": - msg = f"现在请为 {model_name} 选择LoRA模型" - logging.info(msg) - lora_selector_visibility = True - if os.path.isdir("lora"): - lora_choices = get_file_names( - "lora", plain=True, filetypes=[""]) - lora_choices = ["No LoRA"] + lora_choices - elif model_type == ModelType.LLaMA and lora_model_path != "": - logging.info(f"正在加载LLaMA模型: {model_name} + {lora_model_path}") - dont_change_lora_selector = True - if lora_model_path == "No LoRA": - lora_model_path = None - msg += " + No LoRA" - else: - msg += f" + {lora_model_path}" - model = LLaMA_Client(model_name, lora_model_path) - elif model_type == ModelType.XMChat: - if os.environ.get("XMCHAT_API_KEY") != "": - access_key = os.environ.get("XMCHAT_API_KEY") - model = XMChat(api_key=access_key) - elif model_type == ModelType.Unknown: - raise ValueError(f"未知模型: {model_name}") - logging.info(msg) - except Exception as e: - logging.error(e) - msg = f"{STANDARD_ERROR_MSG}: {e}" - if dont_change_lora_selector: - return model, msg - else: - return model, msg, gr.Dropdown.update(choices=lora_choices, visible=lora_selector_visibility) - - -if __name__ == "__main__": - with open("config.json", "r") as f: - openai_api_key = cjson.load(f)["openai_api_key"] - # set logging level to debug - logging.basicConfig(level=logging.DEBUG) - # client = ModelManager(model_name="gpt-3.5-turbo", access_key=openai_api_key) - client = get_model(model_name="chatglm-6b-int4") - chatbot = [] - stream = False - # 测试账单功能 - logging.info(colorama.Back.GREEN + "测试账单功能" + colorama.Back.RESET) - logging.info(client.billing_info()) - # 测试问答 - logging.info(colorama.Back.GREEN + "测试问答" + colorama.Back.RESET) - question = "巴黎是中国的首都吗?" - for i in client.predict(inputs=question, chatbot=chatbot, stream=stream): - logging.info(i) - logging.info(f"测试问答后history : {client.history}") - # 测试记忆力 - logging.info(colorama.Back.GREEN + "测试记忆力" + colorama.Back.RESET) - question = "我刚刚问了你什么问题?" - for i in client.predict(inputs=question, chatbot=chatbot, stream=stream): - logging.info(i) - logging.info(f"测试记忆力后history : {client.history}") - # 测试重试功能 - logging.info(colorama.Back.GREEN + "测试重试功能" + colorama.Back.RESET) - for i in client.retry(chatbot=chatbot, stream=stream): - logging.info(i) - logging.info(f"重试后history : {client.history}") - # # 测试总结功能 - # print(colorama.Back.GREEN + "测试总结功能" + colorama.Back.RESET) - # chatbot, msg = client.reduce_token_size(chatbot=chatbot) - # print(chatbot, msg) - # print(f"总结后history: {client.history}") diff --git a/spaces/abdvl/datahub_qa_bot/docs/api/restli/restli-overview.md b/spaces/abdvl/datahub_qa_bot/docs/api/restli/restli-overview.md deleted file mode 100644 index d8a81075263747254baa371c0c370d21caac5d47..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/api/restli/restli-overview.md +++ /dev/null @@ -1,1458 +0,0 @@ -# Rest.li API - -You can access basic documentation on the API endpoints by opening the `/restli/docs` endpoint in the browser. -``` -python -c "import webbrowser; webbrowser.open('http://localhost:8080/restli/docs', new=2)" -``` - -*Please note that because DataHub is in a period of rapid development, the APIs below are subject to change. - -#### Sample API Calls - -#### Ingesting Aspects - -To ingest individual aspects into DataHub, you can use the following CURL: - -```shell -curl --location --request POST 'http://localhost:8080/aspects?action=ingestProposal' \ ---header 'X-RestLi-Protocol-Version: 2.0.0' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "proposal" : { - "entityType": "dataset", - "entityUrn" : "urn:li:dataset:(urn:li:dataPlatform:hive,SampleHiveDataset,PROD)", - "changeType" : "UPSERT", - "aspectName" : "datasetUsageStatistics", - "aspect" : { - "value" : "{ \"timestampMillis\":1629840771000,\"uniqueUserCount\" : 10, \"totalSqlQueries\": 20, \"fieldCounts\": [ {\"fieldPath\": \"col1\", \"count\": 20}, {\"fieldPath\" : \"col2\", \"count\": 5} ]}", - "contentType": "application/json" - } - } -}' -``` - -Notice that you need to provide the target entity urn, the entity type, a change type (`UPSERT` + `DELETE` supported), -the aspect name, and a JSON-serialized aspect, which corresponds to the PDL schema defined for the aspect. - -For more examples of serialized aspect payloads, see [bootstrap_mce.json](https://github.com/datahub-project/datahub/blob/master/metadata-ingestion/examples/mce_files/bootstrap_mce.json). - -#### Ingesting Entities (Legacy) - -> Note - we are deprecating support for ingesting Entities via Snapshots. Please see **Ingesting Aspects** above for the latest -> guidance around ingesting metadata into DataHub without defining or changing the legacy snapshot models. (e.g. using ConfigEntityRegistry) - -The Entity Snapshot Ingest endpoints allow you to ingest multiple aspects about a particular entity at the same time. - -##### Create a user - -``` -curl 'http://localhost:8080/entities?action=ingest' -X POST --data '{ - "entity":{ - "value":{ - "com.linkedin.metadata.snapshot.CorpUserSnapshot":{ - "urn":"urn:li:corpuser:footbarusername", - "aspects":[ - { - "com.linkedin.identity.CorpUserInfo":{ - "active":true, - "displayName":"Foo Bar", - "fullName":"Foo Bar", - "email":"fbar@linkedin.com" - } - } - ] - } - } - } -}' -``` - -##### Create a group - -``` -curl 'http://localhost:8080/entities?action=ingest' -X POST --data '{ - "entity":{ - "value":{ - "com.linkedin.metadata.snapshot.CorpGroupSnapshot":{ - "urn":"urn:li:corpGroup:dev", - "aspects":[ - { - "com.linkedin.identity.CorpGroupInfo":{ - "email":"dev@linkedin.com", - "admins":[ - "urn:li:corpUser:jdoe" - ], - "members":[ - "urn:li:corpUser:datahub", - "urn:li:corpUser:jdoe" - ], - "groups":[ - - ] - } - } - ] - } - } - } -}' -``` - -##### Create a dataset -``` -curl 'http://localhost:8080/entities?action=ingest' -X POST --data '{ - "entity":{ - "value":{ - "com.linkedin.metadata.snapshot.DatasetSnapshot":{ - "urn":"urn:li:dataset:(urn:li:dataPlatform:foo,bar,PROD)", - "aspects":[ - { - "com.linkedin.common.Ownership":{ - "owners":[ - { - "owner":"urn:li:corpuser:fbar", - "type":"DATAOWNER" - } - ], - "lastModified":{ - "time":0, - "actor":"urn:li:corpuser:fbar" - } - } - }, - { - "com.linkedin.common.InstitutionalMemory":{ - "elements":[ - { - "url":"https://www.linkedin.com", - "description":"Sample doc", - "createStamp":{ - "time":0, - "actor":"urn:li:corpuser:fbar" - } - } - ] - } - }, - { - "com.linkedin.schema.SchemaMetadata":{ - "schemaName":"FooEvent", - "platform":"urn:li:dataPlatform:foo", - "version":0, - "created":{ - "time":0, - "actor":"urn:li:corpuser:fbar" - }, - "lastModified":{ - "time":0, - "actor":"urn:li:corpuser:fbar" - }, - "hash":"", - "platformSchema":{ - "com.linkedin.schema.KafkaSchema":{ - "documentSchema":"{\"type\":\"record\",\"name\":\"MetadataChangeEvent\",\"namespace\":\"com.linkedin.mxe\",\"doc\":\"Kafka event for proposing a metadata change for an entity.\",\"fields\":[{\"name\":\"auditHeader\",\"type\":{\"type\":\"record\",\"name\":\"KafkaAuditHeader\",\"namespace\":\"com.linkedin.avro2pegasus.events\",\"doc\":\"Header\"}}]}" - } - }, - "fields":[ - { - "fieldPath":"foo", - "description":"Bar", - "nativeDataType":"string", - "type":{ - "type":{ - "com.linkedin.schema.StringType":{ - - } - } - } - } - ] - } - } - ] - } - } - } -}' -``` - -##### Create a chart -``` -curl 'http://localhost:8080/entities?action=ingest' -X POST --data '{ - "entity":{ - "value":{ - "com.linkedin.metadata.snapshot.ChartSnapshot":{ - "urn":"urn:li:chart:(looker,baz1)", - "aspects":[ - { - "com.linkedin.chart.ChartInfo":{ - "title":"Baz Chart 1", - "description":"Baz Chart 1", - "inputs":[ - { - "string":"urn:li:dataset:(urn:li:dataPlatform:hdfs,SampleHdfsDataset,PROD)" - } - ], - "lastModified":{ - "created":{ - "time":0, - "actor":"urn:li:corpuser:jdoe" - }, - "lastModified":{ - "time":0, - "actor":"urn:li:corpuser:datahub" - } - } - } - } - ] - } - } - } -}' -``` - -##### Create a dashboard -``` -curl 'http://localhost:8080/entities?action=ingest' -X POST --data '{ - "entity":{ - "value":{ - "com.linkedin.metadata.snapshot.DashboardSnapshot":{ - "urn":"urn:li:dashboard:(looker,baz)", - "aspects":[ - { - "com.linkedin.dashboard.DashboardInfo":{ - "title":"Baz Dashboard", - "description":"Baz Dashboard", - "charts":[ - "urn:li:chart:(looker,baz1)", - "urn:li:chart:(looker,baz2)" - ], - "lastModified":{ - "created":{ - "time":0, - "actor":"urn:li:corpuser:jdoe" - }, - "lastModified":{ - "time":0, - "actor":"urn:li:corpuser:datahub" - } - } - } - } - ] - } - } - } -}' -``` - -##### Create Tags - -To create a new tag called "Engineering", we can use the following curl. - -``` -curl 'http://localhost:8080/entities?action=ingest' -X POST --data '{ - "entity":{ - "value":{ - "com.linkedin.metadata.snapshot.TagSnapshot":{ - "urn":"urn:li:tag:Engineering", - "aspects":[ - { - "com.linkedin.tag.TagProperties":{ - "name":"Engineering", - "description":"The tag will be assigned to all assets owned by the Eng org." - } - } - ] - } - } - } -}' -``` - -This tag can subsequently be associated with a Data Asset using the "Global Tags" aspect associated with each. For example, -to add a tag to a Dataset, you can use the following CURL: - -``` -curl 'http://localhost:8080/entities?action=ingest' -X POST --data '{ - "entity":{ - "value":{ - "com.linkedin.metadata.snapshot.DatasetSnapshot":{ - "urn":"urn:li:dataset:(urn:li:dataPlatform:foo,bar,PROD)", - "aspects":[ - { - "com.linkedin.common.GlobalTags":{ - "tags":[ - { - "tag":"urn:li:tag:Engineering" - } - ] - } - } - ] - } - } - } -}' -``` - -And to add the tag to a field in a particular Dataset's schema, you can use a CURL to update the EditableSchemaMetadata Aspect: - -``` -curl 'http://localhost:8080/entities?action=ingest' -X POST --data '{ - "entity":{ - "value":{ - "com.linkedin.metadata.snapshot.DatasetSnapshot":{ - "urn":"urn:li:dataset:(urn:li:dataPlatform:foo,bar,PROD)", - "aspects":[ - { - "com.linkedin.schema.EditableSchemaMetadata": { - "editableSchemaFieldInfo":[ - { - "fieldPath":"myFieldName", - "globalTags": { - "tags":[ - { - "tag":"urn:li:tag:Engineering" - } - ] - } - } - ] - } - } - ] - } - } - } -}' -``` - - -##### Soft Deleting an Entity - -DataHub uses a special "Status" aspect associated with each entity to represent the lifecycle state of an Entity. -To soft delete an entire Entity, you can use the special "Status" aspect. Note that soft deletion means that -an entity will not be discoverable via Search or Browse, but its entity page will still be viewable. - -For example, to delete a particular chart: - -``` -curl 'http://localhost:8080/entities?action=ingest' -X POST --data '{ - "entity":{ - "value":{ - "com.linkedin.metadata.snapshot.ChartSnapshot":{ - "aspects":[ - { - "com.linkedin.common.Status":{ - "removed": true - } - } - ], - "urn":"urn:li:chart:(looker,baz1)" - } - } - } -}' -``` - -To re-enable the Entity, you can make a similar request: - -``` -curl 'http://localhost:8080/entities?action=ingest' -X POST --data '{ - "entity":{ - "value":{ - "com.linkedin.metadata.snapshot.ChartSnapshot":{ - "aspects":[ - { - "com.linkedin.common.Status":{ - "removed": false - } - } - ], - "urn":"urn:li:chart:(looker,baz1)" - } - } - } -}' -``` - -To issue a hard delete or soft-delete, or undo a particular ingestion run, you can use the [DataHub CLI](docs/how/delete-metadata.md). - - -#### Retrieving Entity Aspects - -Simply curl the `entitiesV2` endpoint of GMS: - -``` -curl 'http://localhost:8080/entitiesV2/' -``` - -For example, to retrieve the latest aspects associated with the "SampleHdfsDataset" `Dataset`: - -``` -curl --header 'X-RestLi-Protocol-Version: 2.0.0' 'http://localhost:8080/entitiesV2/urn%3Ali%3Adataset%3A%28urn%3Ali%3AdataPlatform%3Ahdfs%2CSampleHdfsDataset%2CPROD%29' -``` - -**Example Response** - -```json -{ - "urn":"urn:li:dataset:(urn:li:dataPlatform:hdfs,SampleHdfsDataset,PROD)", - "aspects":{ - "editableSchemaMetadata":{ - "name":"editableSchemaMetadata", - "version":0, - "value":{ - "created":{ - "actor":"urn:li:corpuser:jdoe", - "time":1581407189000 - }, - "editableSchemaFieldInfo":[ - { - "fieldPath":"shipment_info", - "globalTags":{ - "tags":[ - { - "tag":"urn:li:tag:Legacy" - } - ] - } - } - ], - "lastModified":{ - "actor":"urn:li:corpuser:jdoe", - "time":1581407189000 - } - }, - "created":{ - "actor":"urn:li:corpuser:UNKNOWN", - "time":1646245614843 - } - }, - "browsePaths":{ - "name":"browsePaths", - "version":0, - "value":{ - "paths":[ - "/prod/hdfs/SampleHdfsDataset" - ] - }, - "created":{ - "actor":"urn:li:corpuser:UNKNOWN", - "time":1646245614843 - } - }, - "datasetKey":{ - "name":"datasetKey", - "version":0, - "value":{ - "name":"SampleHdfsDataset", - "platform":"urn:li:dataPlatform:hdfs", - "origin":"PROD" - }, - "created":{ - "actor":"urn:li:corpuser:UNKNOWN", - "time":1646245614843 - } - }, - "ownership":{ - "name":"ownership", - "version":0, - "value":{ - "owners":[ - { - "owner":"urn:li:corpuser:jdoe", - "type":"DATAOWNER" - }, - { - "owner":"urn:li:corpuser:datahub", - "type":"DATAOWNER" - } - ], - "lastModified":{ - "actor":"urn:li:corpuser:jdoe", - "time":1581407189000 - } - }, - "created":{ - "actor":"urn:li:corpuser:UNKNOWN", - "time":1646245614843 - } - }, - "dataPlatformInstance":{ - "name":"dataPlatformInstance", - "version":0, - "value":{ - "platform":"urn:li:dataPlatform:hdfs" - }, - "created":{ - "actor":"urn:li:corpuser:UNKNOWN", - "time":1646245614843 - } - }, - "institutionalMemory":{ - "name":"institutionalMemory", - "version":0, - "value":{ - "elements":[ - { - "createStamp":{ - "actor":"urn:li:corpuser:jdoe", - "time":1581407189000 - }, - "description":"Sample doc", - "url":"https://www.linkedin.com" - } - ] - }, - "created":{ - "actor":"urn:li:corpuser:UNKNOWN", - "time":1646245614843 - } - }, - "schemaMetadata":{ - "name":"schemaMetadata", - "version":0, - "value":{ - "created":{ - "actor":"urn:li:corpuser:jdoe", - "time":1581407189000 - }, - "platformSchema":{ - "com.linkedin.schema.KafkaSchema":{ - "documentSchema":"{\"type\":\"record\",\"name\":\"SampleHdfsSchema\",\"namespace\":\"com.linkedin.dataset\",\"doc\":\"Sample HDFS dataset\",\"fields\":[{\"name\":\"field_foo\",\"type\":[\"string\"]},{\"name\":\"field_bar\",\"type\":[\"boolean\"]}]}" - } - }, - "lastModified":{ - "actor":"urn:li:corpuser:jdoe", - "time":1581407189000 - }, - "schemaName":"SampleHdfsSchema", - "fields":[ - { - "nullable":false, - "fieldPath":"shipment_info", - "description":"Shipment info description", - "isPartOfKey":false, - "type":{ - "type":{ - "com.linkedin.schema.RecordType":{ - - } - } - }, - "nativeDataType":"varchar(100)", - "recursive":false - }, - { - "nullable":false, - "fieldPath":"shipment_info.date", - "description":"Shipment info date description", - "isPartOfKey":false, - "type":{ - "type":{ - "com.linkedin.schema.DateType":{ - - } - } - }, - "nativeDataType":"Date", - "recursive":false - }, - { - "nullable":false, - "fieldPath":"shipment_info.target", - "description":"Shipment info target description", - "isPartOfKey":false, - "type":{ - "type":{ - "com.linkedin.schema.StringType":{ - - } - } - }, - "nativeDataType":"text", - "recursive":false - }, - { - "nullable":false, - "fieldPath":"shipment_info.destination", - "description":"Shipment info destination description", - "isPartOfKey":false, - "type":{ - "type":{ - "com.linkedin.schema.StringType":{ - - } - } - }, - "nativeDataType":"varchar(100)", - "recursive":false - }, - { - "nullable":false, - "fieldPath":"shipment_info.geo_info", - "description":"Shipment info geo_info description", - "isPartOfKey":false, - "type":{ - "type":{ - "com.linkedin.schema.RecordType":{ - - } - } - }, - "nativeDataType":"varchar(100)", - "recursive":false - }, - { - "nullable":false, - "fieldPath":"shipment_info.geo_info.lat", - "description":"Shipment info geo_info lat", - "isPartOfKey":false, - "type":{ - "type":{ - "com.linkedin.schema.NumberType":{ - - } - } - }, - "nativeDataType":"float", - "recursive":false - }, - { - "nullable":false, - "fieldPath":"shipment_info.geo_info.lng", - "description":"Shipment info geo_info lng", - "isPartOfKey":false, - "type":{ - "type":{ - "com.linkedin.schema.NumberType":{ - - } - } - }, - "nativeDataType":"float", - "recursive":false - } - ], - "version":0, - "hash":"", - "platform":"urn:li:dataPlatform:hdfs" - }, - "created":{ - "actor":"urn:li:corpuser:UNKNOWN", - "time":1646245614843 - } - }, - "upstreamLineage":{ - "name":"upstreamLineage", - "version":0, - "value":{ - "upstreams":[ - { - "auditStamp":{ - "actor":"urn:li:corpuser:jdoe", - "time":1581407189000 - }, - "type":"TRANSFORMED", - "dataset":"urn:li:dataset:(urn:li:dataPlatform:kafka,SampleKafkaDataset,PROD)" - } - ] - }, - "created":{ - "actor":"urn:li:corpuser:UNKNOWN", - "time":1646245614843 - } - } - }, - "entityName":"dataset" -} -``` - -You can also optionally limit to specific aspects using the `aspects` query parameter: - -``` -curl 'http://localhost:8080/entitiesV2/?aspects=List(upstreamLineage)' -``` - -#### Retrieving Entities (Legacy) - -> Note that this method of retrieving entities is deprecated, as it uses the legacy Snapshot models. Please refer to the **Retriving Entity Aspects** section above for the -> latest guidance. - -The Entity Snapshot Get APIs allow to retrieve the latest version of each aspect associated with an Entity. - -In general, when reading entities by primary key (urn), you will use the general-purpose `entities` endpoints. To fetch by primary key (urn), you'll -issue a query of the following form: - -``` -curl 'http://localhost:8080/entities/' -``` - -##### Get a CorpUser - -``` -curl 'http://localhost:8080/entities/urn%3Ali%3Acorpuser%3Afbar' - -{ - "value":{ - "com.linkedin.metadata.snapshot.CorpUserSnapshot":{ - "urn":"urn:li:corpuser:fbar", - "aspects":[ - { - "com.linkedin.metadata.key.CorpUserKey":{ - "username":"fbar" - } - }, - { - "com.linkedin.identity.CorpUserInfo":{ - "active":true, - "fullName":"Foo Bar", - "displayName":"Foo Bar", - "email":"fbar@linkedin.com" - } - }, - { - "com.linkedin.identity.CorpUserEditableInfo":{ - - } - } - ] - } - } -} -``` - - -##### Get a CorpGroup - -``` -curl 'http://localhost:8080/entities/urn%3Ali%3AcorpGroup%3Adev' - -{ - "value":{ - "com.linkedin.metadata.snapshot.CorpGroupSnapshot":{ - "urn":"urn:li:corpGroup:dev", - "aspects":[ - { - "com.linkedin.metadata.key.CorpGroupKey":{ - "name":"dev" - } - }, - { - "com.linkedin.identity.CorpGroupInfo":{ - "groups":[ - - ], - "email":"dev@linkedin.com", - "admins":[ - "urn:li:corpUser:jdoe" - ], - "members":[ - "urn:li:corpUser:datahub", - "urn:li:corpUser:jdoe" - ] - } - } - ] - } - } -} -``` - -##### Get a Dataset -``` -curl 'http://localhost:8080/entities/urn%3Ali%3Adataset%3A(urn%3Ali%3AdataPlatform%3Afoo,bar,PROD)' - -{ - "value":{ - "com.linkedin.metadata.snapshot.DatasetSnapshot":{ - "urn":"urn:li:dataset:(urn:li:dataPlatform:foo,bar,PROD)", - "aspects":[ - { - "com.linkedin.metadata.key.DatasetKey":{ - "origin":"PROD", - "name":"bar", - "platform":"urn:li:dataPlatform:foo" - } - }, - { - "com.linkedin.common.InstitutionalMemory":{ - "elements":[ - { - "createStamp":{ - "actor":"urn:li:corpuser:fbar", - "time":0 - }, - "description":"Sample doc", - "url":"https://www.linkedin.com" - } - ] - } - }, - { - "com.linkedin.common.Ownership":{ - "owners":[ - { - "owner":"urn:li:corpuser:fbar", - "type":"DATAOWNER" - } - ], - "lastModified":{ - "actor":"urn:li:corpuser:fbar", - "time":0 - } - } - }, - { - "com.linkedin.schema.SchemaMetadata":{ - "created":{ - "actor":"urn:li:corpuser:fbar", - "time":0 - }, - "platformSchema":{ - "com.linkedin.schema.KafkaSchema":{ - "documentSchema":"{\"type\":\"record\",\"name\":\"MetadataChangeEvent\",\"namespace\":\"com.linkedin.mxe\",\"doc\":\"Kafka event for proposing a metadata change for an entity.\",\"fields\":[{\"name\":\"auditHeader\",\"type\":{\"type\":\"record\",\"name\":\"KafkaAuditHeader\",\"namespace\":\"com.linkedin.avro2pegasus.events\",\"doc\":\"Header\"}}]}" - } - }, - "lastModified":{ - "actor":"urn:li:corpuser:fbar", - "time":0 - }, - "schemaName":"FooEvent", - "fields":[ - { - "fieldPath":"foo", - "description":"Bar", - "type":{ - "type":{ - "com.linkedin.schema.StringType":{ - - } - } - }, - "nativeDataType":"string" - } - ], - "version":0, - "hash":"", - "platform":"urn:li:dataPlatform:foo" - } - }, - { - "com.linkedin.common.BrowsePaths":{ - "paths":[ - "/prod/foo/bar" - ] - } - }, - { - "com.linkedin.dataset.UpstreamLineage":{ - "upstreams":[ - { - "auditStamp":{ - "actor":"urn:li:corpuser:fbar", - "time":0 - }, - "type":"TRANSFORMED", - "dataset":"urn:li:dataset:(urn:li:dataPlatform:foo,barUp,PROD)" - } - ] - } - } - ] - } - } -} -``` - -##### Get a Chart -``` -curl 'http://localhost:8080/entities/urn%3Ali%3Achart%3A(looker,baz1)' - -{ - "value":{ - "com.linkedin.metadata.snapshot.ChartSnapshot":{ - "urn":"urn:li:chart:(looker,baz1)", - "aspects":[ - { - "com.linkedin.metadata.key.ChartKey":{ - "chartId":"baz1", - "dashboardTool":"looker" - } - }, - { - "com.linkedin.common.BrowsePaths":{ - "paths":[ - "/looker/baz1" - ] - } - }, - { - "com.linkedin.chart.ChartInfo":{ - "description":"Baz Chart 1", - "lastModified":{ - "created":{ - "actor":"urn:li:corpuser:jdoe", - "time":0 - }, - "lastModified":{ - "actor":"urn:li:corpuser:datahub", - "time":0 - } - }, - "title":"Baz Chart 1", - "inputs":[ - { - "string":"urn:li:dataset:(urn:li:dataPlatform:hdfs,SampleHdfsDataset,PROD)" - } - ] - } - } - ] - } - } -} -``` - -##### Get a Dashboard -``` -curl 'http://localhost:8080/entities/urn%3Ali%3Adashboard%3A(looker,foo)' - -{ - "value":{ - "com.linkedin.metadata.snapshot.DashboardSnapshot":{ - "urn":"urn:li:dashboard:(looker,foo)", - "aspects":[ - { - "com.linkedin.metadata.key.DashboardKey":{ - "dashboardId":"foo", - "dashboardTool":"looker" - } - } - ] - } - } -} -``` - -##### Get a GlossaryTerm -``` -curl 'http://localhost:8080/entities/urn%3Ali%3AglossaryTerm%3A(instruments,instruments.FinancialInstrument_v1)' -{ - "value":{ - "com.linkedin.metadata.snapshot.GlossaryTermSnapshot":{ - "urn":"urn:li:glossaryTerm:instruments.FinancialInstrument_v1", - "ownership":{ - "owners":[ - { - "owner":"urn:li:corpuser:jdoe", - "type":"DATAOWNER" - } - ], - "lastModified":{ - "actor":"urn:li:corpuser:jdoe", - "time":1581407189000 - } - }, - "glossaryTermInfo":{ - "definition":"written contract that gives rise to both a financial asset of one entity and a financial liability of another entity", - "customProperties":{ - "FQDN":"full" - }, - "sourceRef":"FIBO", - "sourceUrl":"https://spec.edmcouncil.org/fibo/ontology/FBC/FinancialInstruments/FinancialInstruments/FinancialInstrument", - "termSource":"EXTERNAL" - } - } - } -} -``` - -##### Browse an Entity - -To browse (explore) for an Entity of a particular type (e.g. dataset, chart, etc), you can use the following query format: - -``` -curl -X POST 'http://localhost:8080/entities?action=browse' \ ---data '{ - "path": "", - "entity": "", - "start": 0, - "limit": 10 -}' -``` - -For example, to browse the "charts" entity, you could use the following query: - -``` -curl -X POST 'http://localhost:8080/entities?action=browse' \ ---data '{ - "path": "/looker", - "entity": "chart", - "start": 0, - "limit": 10 -}' - -{ - "value":{ - "numEntities":1, - "pageSize":1, - "metadata":{ - "totalNumEntities":1, - "groups":[ - - ], - "path":"/looker" - }, - "from":0, - "entities":[ - { - "name":"baz1", - "urn":"urn:li:chart:(looker,baz1)" - } - ] - } -} -``` - -##### Search an Entity - -To search for an Entity of a particular type (e.g. dataset, chart, etc), you can use the following query format: - -``` -curl -X POST 'http://localhost:8080/entities?action=search' \ ---data '{ - "input": "", - "entity": "", - "start": 0, - "count": 10 -}' -``` - -The API will return a list of URNs that matched your search query. - -For example, to search the "charts" entity, you could use the following query: - -``` -curl -X POST 'http://localhost:8080/entities?action=search' \ ---data '{ - "input": "looker", - "entity": "chart", - "start": 0, - "count": 10 -}' - -{ - "value":{ - "numEntities":1, - "pageSize":10, - "metadata":{ - "urns":[ - "urn:li:chart:(looker,baz1)" - ], - "matches":[ - { - "matchedFields":[ - { - "name":"tool", - "value":"looker" - } - ] - } - ], - "searchResultMetadatas":[ - { - "name":"tool", - "aggregations":{ - "looker":1 - } - } - ] - }, - "from":0, - "entities":[ - "urn:li:chart:(looker,baz1)" - ] - } -} -``` - -###### Exact Match Search - -You can use colon search for exact match searching on particular @Searchable fields of an Entity. - -###### Example: Find assets by Tag - -For example, to fetch all Datasets having a particular tag (Engineering), we can use the following query: - -``` -curl -X POST 'http://localhost:8080/entities?action=search' \ ---data '{ - "input": "tags:Engineering", - "entity": "dataset", - "start": 0, - "count": 10 -}' - -{ - "value":{ - "numEntities":1, - "pageSize":10, - "metadata":{ - "urns":[ - "urn:li:dataset:(urn:li:dataPlatform:foo,bar,PROD)" - ], - "matches":[ - { - "matchedFields":[ - { - "name":"tags", - "value":"urn:li:tag:Engineering" - } - ] - } - ], - "searchResultMetadatas":[ - { - "name":"platform", - "aggregations":{ - "foo":1 - } - }, - { - "name":"origin", - "aggregations":{ - "PROD":1 - } - } - ] - }, - "from":0, - "entities":[ - "urn:li:dataset:(urn:li:dataPlatform:foo,bar,PROD)" - ] - } -} -``` - -###### Filtering - -In addition to performing full-text search, you can also filter explicitly against fields marked as @Searchable in the corresponding aspect PDLs. - -For example, to perform filtering for a chart with title "Baz Chart 1", you could issue the following query: - -``` -curl -X POST 'http://localhost:8080/entities?action=search' \ ---data '{ - "input": "looker", - "entity": "chart", - "start": 0, - "count": 10, - "filter": { - "or": [{ - "and": [ - { - "field": "title", - "value": "Baz Chart 1", - "condition": "EQUAL" - } - ] - }] - } -}' - -{ - "value":{ - "numEntities":1, - "pageSize":10, - "metadata":{ - "urns":[ - "urn:li:chart:(looker,baz1)" - ], - "matches":[ - { - "matchedFields":[ - { - "name":"tool", - "value":"looker" - } - ] - } - ], - "searchResultMetadatas":[ - { - "name":"tool", - "aggregations":{ - "looker":1 - } - } - ] - }, - "from":0, - "entities":[ - "urn:li:chart:(looker,baz1)" - ] - } -} -``` - -where valid conditions include - - CONTAIN - - END_WITH - - EQUAL - - GREATER_THAN - - GREATER_THAN_OR_EQUAL_TO - - LESS_THAN - - LESS_THAN_OR_EQUAL_TO - - START_WITH - -*Note that the search API only includes data corresponding to the latest snapshots of a particular Entity. - - -##### Autocomplete against fields of an entity - -To autocomplete a query for a particular entity type, you can use a query of the following form: - -``` -curl -X POST 'http://localhost:8080/entities?action=autocomplete' \ ---data '{ - "query": "", - "entity": "", - "limit": 10 -}' -``` - -For example, to autocomplete a query against all Dataset entities, you could issue the following: - -``` -curl -X POST 'http://localhost:8080/entities?action=autocomplete' \ ---data '{ - "query": "Baz Ch", - "entity": "chart", - "start": 0, - "limit": 10 -}' - -{ - "value":{ - "suggestions":[ - "Baz Chart 1" - ], - "query":"Baz Ch" - } -} -``` - -Note that you can also provide a `Filter` to the autocomplete endpoint: - -``` -curl -X POST 'http://localhost:8080/entities?action=autocomplete' \ ---data '{ - "query": "Baz C", - "entity": "chart", - "start": 0, - "limit": 10, - "filter": { - "or": [{ - "and": [ - { - "field": "tool", - "value": "looker", - "condition": "EQUAL" - } - ] - }] - } -}' - -{ - "value":{ - "suggestions":[ - "Baz Chart 1" - ], - "query":"Baz Ch" - } -} -``` - -*Note that the autocomplete API only includes data corresponding to the latest snapshots of a particular Entity. - - -##### Get a Versioned Aspect - -In addition to fetching the set of latest Snapshot aspects for an entity, we also support doing a point lookup of an entity at a particular version. - -To do so, you can use the following query template: - -``` -curl 'http://localhost:8080/aspects/?aspect=&version= -``` - -Which will return a VersionedAspect, which is a record containing a version and an aspect inside a Rest.li Union, wherein the fully-qualified record name of the -aspect is the key for the union. - -For example, to fetch the latest version of a Dataset's "schemaMetadata" aspect, you could issue the following query: - -``` -curl 'http://localhost:8080/aspects/urn%3Ali%3Adataset%3A(urn%3Ali%3AdataPlatform%3Afoo%2Cbar%2CPROD)?aspect=schemaMetadata&version=0' - -{ - "version":0, - "aspect":{ - "com.linkedin.schema.SchemaMetadata":{ - "created":{ - "actor":"urn:li:corpuser:fbar", - "time":0 - }, - "platformSchema":{ - "com.linkedin.schema.KafkaSchema":{ - "documentSchema":"{\"type\":\"record\",\"name\":\"MetadataChangeEvent\",\"namespace\":\"com.linkedin.mxe\",\"doc\":\"Kafka event for proposing a metadata change for an entity.\",\"fields\":[{\"name\":\"auditHeader\",\"type\":{\"type\":\"record\",\"name\":\"KafkaAuditHeader\",\"namespace\":\"com.linkedin.avro2pegasus.events\",\"doc\":\"Header\"}}]}" - } - }, - "lastModified":{ - "actor":"urn:li:corpuser:fbar", - "time":0 - }, - "schemaName":"FooEvent", - "fields":[ - { - "fieldPath":"foo", - "description":"Bar", - "type":{ - "type":{ - "com.linkedin.schema.StringType":{ - - } - } - }, - "nativeDataType":"string" - } - ], - "version":0, - "hash":"", - "platform":"urn:li:dataPlatform:foo" - } - } -} -``` - -Keep in mind that versions increase monotonically *after* version 0, which represents the latest. - -Note that this API will soon be deprecated and replaced by the V2 Aspect API, discussed below. - -##### Get a range of Versioned Aspects - -*Coming Soon*! - -##### Get a range of Timeseries Aspects - -With the introduction of Timeseries Aspects, we've introduced a new API for fetching a series of aspects falling into a particular time range. For this, you'll -use the `/aspects` endpoint. The V2 APIs are unique in that they return a new type of payload: an "Enveloped Aspect". This is essentially a serialized aspect along with -some system metadata. The serialized aspect can be in any form, though we currently default to escaped Rest.li-compatible JSON. - -Callers of the V2 Aspect APIs will be expected to deserialize the aspect payload in the way they see fit. For example, they may bind the deserialized JSON object -into a strongly typed Rest.li RecordTemplate class (which is what datahub-frontend does). The benefit of doing it this way is thaet we remove the necessity to -use Rest.li Unions to represent an object which can take on multiple payload forms. It also makes adding and removing aspects from the model easier, a process -which could theoretically be done at runtime as opposed to at deploy time. - -To fetch a set of Timeseries Aspects that fall into a particular time range, you can use the following query template: - -``` -curl -X POST 'http://localhost:8080/aspects?action=getTimeseriesAspectValues' \ ---data '{ - "urn": "", - "entity": "", - "aspect": "", - "startTimeMillis": "", - "endTimeMillis": "" -}' -``` - -For example, to fetch "datasetProfile" timeseries aspects for a dataset with urn `urn:li:dataset:(urn:li:dataPlatform:foo,barUp,PROD)` -that were reported after July 26, 2021 and before July 28, 2021, you could issue the following query: - -``` -curl -X POST 'http://localhost:8080/aspects?action=getTimeseriesAspectValues' \ ---data '{ - "urn": "urn:li:dataset:(urn:li:dataPlatform:redshift,global_dev.larxynx_carcinoma_data_2020,PROD)", - "entity": "dataset", - "aspect": "datasetProfile", - "startTimeMillis": 1625122800000, - "endTimeMillis": 1627455600000 -}' - -{ - "value":{ - "limit":10000, - "aspectName":"datasetProfile", - "endTimeMillis":1627455600000, - "startTimeMillis":1625122800000, - "entityName":"dataset", - "values":[ - { - "aspect":{ - "value":"{\"timestampMillis\":1626912000000,\"fieldProfiles\":[{\"uniqueProportion\":1.0,\"sampleValues\":[\"123MMKK12\",\"13KDFMKML\",\"123NNJJJL\"],\"fieldPath\":\"id\",\"nullCount\":0,\"nullProportion\":0.0,\"uniqueCount\":3742},{\"uniqueProportion\":1.0,\"min\":\"1524406400000\",\"max\":\"1624406400000\",\"sampleValues\":[\"1640023230002\",\"1640343012207\",\"16303412330117\"],\"mean\":\"1555406400000\",\"fieldPath\":\"date\",\"nullCount\":0,\"nullProportion\":0.0,\"uniqueCount\":3742},{\"uniqueProportion\":0.037,\"min\":\"21\",\"median\":\"68\",\"max\":\"92\",\"sampleValues\":[\"45\",\"65\",\"81\"],\"mean\":\"65\",\"distinctValueFrequencies\":[{\"value\":\"12\",\"frequency\":103},{\"value\":\"54\",\"frequency\":12}],\"fieldPath\":\"patient_age\",\"nullCount\":0,\"nullProportion\":0.0,\"uniqueCount\":79},{\"uniqueProportion\":0.00820873786407767,\"sampleValues\":[\"male\",\"female\"],\"fieldPath\":\"patient_gender\",\"nullCount\":120,\"nullProportion\":0.03,\"uniqueCount\":2}],\"rowCount\":3742,\"columnCount\":4}", - "contentType":"application/json" - } - }, - ] - } -} -``` - -You'll notice that in this API (V2), we return a generic serialized aspect string as opposed to an inlined Rest.li-serialized Snapshot Model. - -This is part of an initiative to move from MCE + MAE to MetadataChangeProposal and MetadataChangeLog. For more information, see [this doc](docs/advanced/mcp-mcl.md). - -##### Get Relationships (Edges) - -To get relationships between entities, you can use the `/relationships` API. Do do so, you must provide the following inputs: - -1. Urn of the source node -2. Direction of the edge (INCOMING, OUTGOING) -3. The name of the Relationship (This can be found in Aspect PDLs within the @Relationship annotation) - -For example, to get all entities owned by `urn:li:corpuser:fbar`, we could issue the following query: - -``` -curl 'http://localhost:8080/relationships?direction=INCOMING&urn=urn%3Ali%3Acorpuser%3Auser1&types=OwnedBy' -``` - -which will return a list of urns, representing entities on the other side of the relationship: - -``` -{ - "entities":[ - urn:li:dataset:(urn:li:dataPlatform:foo,barUp,PROD) - ] -} -``` - -## FAQ - -*1. How do I find the valid set of Entity names?* - -Entities are named inside of PDL schemas. Each entity will be annotated with the @Entity annotation, which will include a "name" field inside. -This represents the "common name" for the entity which can be used in browsing, searching, and more. By default, DataHub ships with the following entities: - -By convention, all entity PDLs live under `metadata-models/src/main/pegasus/com/linkedin/metadata/snapshot` - -*2. How do I find the valid set of Aspect names?* - -Aspects are named inside of PDL schemas. Each aspect will be annotated with the @Aspect annotation, which will include a "name" field inside. -This represents the "common name" for the entity which can be used in browsing, searching, and more. - -By convention, all entity PDLs live under `metadata-models/src/main/pegasus/com/linkedin/metadata/common` or `metadata-models/src/main/pegasus/com/linkedin/metadata/`. For example, -the dataset-specific aspects are located under `metadata-models/src/main/pegasus/com/linkedin/metadata/dataset`. - -*3. How do I find the valid set of Relationship names?* - -All relationships are defined on foreign-key fields inside Aspect PDLs. They are reflected by fields bearing the @Relationship annotation. Inside this annotation -is a "name" field that defines the standardized name of the Relationship to be used when querying. - -By convention, all entity PDLs live under `metadata-models/src/main/pegasus/com/linkedin/metadata/common` or `metadata-models/src/main/pegasus/com/linkedin/metadata/`. For example, -the dataset-specific aspects are located under `metadata-models/src/main/pegasus/com/linkedin/metadata/dataset`. - diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/models/utils/self_attention_block.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/models/utils/self_attention_block.py deleted file mode 100644 index 440c7b73ee4706fde555595926d63a18d7574acc..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/models/utils/self_attention_block.py +++ /dev/null @@ -1,159 +0,0 @@ -import torch -from annotator.uniformer.mmcv.cnn import ConvModule, constant_init -from torch import nn as nn -from torch.nn import functional as F - - -class SelfAttentionBlock(nn.Module): - """General self-attention block/non-local block. - - Please refer to https://arxiv.org/abs/1706.03762 for details about key, - query and value. - - Args: - key_in_channels (int): Input channels of key feature. - query_in_channels (int): Input channels of query feature. - channels (int): Output channels of key/query transform. - out_channels (int): Output channels. - share_key_query (bool): Whether share projection weight between key - and query projection. - query_downsample (nn.Module): Query downsample module. - key_downsample (nn.Module): Key downsample module. - key_query_num_convs (int): Number of convs for key/query projection. - value_num_convs (int): Number of convs for value projection. - matmul_norm (bool): Whether normalize attention map with sqrt of - channels - with_out (bool): Whether use out projection. - conv_cfg (dict|None): Config of conv layers. - norm_cfg (dict|None): Config of norm layers. - act_cfg (dict|None): Config of activation layers. - """ - - def __init__(self, key_in_channels, query_in_channels, channels, - out_channels, share_key_query, query_downsample, - key_downsample, key_query_num_convs, value_out_num_convs, - key_query_norm, value_out_norm, matmul_norm, with_out, - conv_cfg, norm_cfg, act_cfg): - super(SelfAttentionBlock, self).__init__() - if share_key_query: - assert key_in_channels == query_in_channels - self.key_in_channels = key_in_channels - self.query_in_channels = query_in_channels - self.out_channels = out_channels - self.channels = channels - self.share_key_query = share_key_query - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.key_project = self.build_project( - key_in_channels, - channels, - num_convs=key_query_num_convs, - use_conv_module=key_query_norm, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - if share_key_query: - self.query_project = self.key_project - else: - self.query_project = self.build_project( - query_in_channels, - channels, - num_convs=key_query_num_convs, - use_conv_module=key_query_norm, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.value_project = self.build_project( - key_in_channels, - channels if with_out else out_channels, - num_convs=value_out_num_convs, - use_conv_module=value_out_norm, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - if with_out: - self.out_project = self.build_project( - channels, - out_channels, - num_convs=value_out_num_convs, - use_conv_module=value_out_norm, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - else: - self.out_project = None - - self.query_downsample = query_downsample - self.key_downsample = key_downsample - self.matmul_norm = matmul_norm - - self.init_weights() - - def init_weights(self): - """Initialize weight of later layer.""" - if self.out_project is not None: - if not isinstance(self.out_project, ConvModule): - constant_init(self.out_project, 0) - - def build_project(self, in_channels, channels, num_convs, use_conv_module, - conv_cfg, norm_cfg, act_cfg): - """Build projection layer for key/query/value/out.""" - if use_conv_module: - convs = [ - ConvModule( - in_channels, - channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - ] - for _ in range(num_convs - 1): - convs.append( - ConvModule( - channels, - channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - else: - convs = [nn.Conv2d(in_channels, channels, 1)] - for _ in range(num_convs - 1): - convs.append(nn.Conv2d(channels, channels, 1)) - if len(convs) > 1: - convs = nn.Sequential(*convs) - else: - convs = convs[0] - return convs - - def forward(self, query_feats, key_feats): - """Forward function.""" - batch_size = query_feats.size(0) - query = self.query_project(query_feats) - if self.query_downsample is not None: - query = self.query_downsample(query) - query = query.reshape(*query.shape[:2], -1) - query = query.permute(0, 2, 1).contiguous() - - key = self.key_project(key_feats) - value = self.value_project(key_feats) - if self.key_downsample is not None: - key = self.key_downsample(key) - value = self.key_downsample(value) - key = key.reshape(*key.shape[:2], -1) - value = value.reshape(*value.shape[:2], -1) - value = value.permute(0, 2, 1).contiguous() - - sim_map = torch.matmul(query, key) - if self.matmul_norm: - sim_map = (self.channels**-.5) * sim_map - sim_map = F.softmax(sim_map, dim=-1) - - context = torch.matmul(sim_map, value) - context = context.permute(0, 2, 1).contiguous() - context = context.reshape(batch_size, -1, *query_feats.shape[2:]) - if self.out_project is not None: - context = self.out_project(context) - return context diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/ops/border_align.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/ops/border_align.py deleted file mode 100644 index ff305be328e9b0a15e1bbb5e6b41beb940f55c81..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/ops/border_align.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# modified from -# https://github.com/Megvii-BaseDetection/cvpods/blob/master/cvpods/layers/border_align.py - -import torch -import torch.nn as nn -from torch.autograd import Function -from torch.autograd.function import once_differentiable - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', ['border_align_forward', 'border_align_backward']) - - -class BorderAlignFunction(Function): - - @staticmethod - def symbolic(g, input, boxes, pool_size): - return g.op( - 'mmcv::MMCVBorderAlign', input, boxes, pool_size_i=pool_size) - - @staticmethod - def forward(ctx, input, boxes, pool_size): - ctx.pool_size = pool_size - ctx.input_shape = input.size() - - assert boxes.ndim == 3, 'boxes must be with shape [B, H*W, 4]' - assert boxes.size(2) == 4, \ - 'the last dimension of boxes must be (x1, y1, x2, y2)' - assert input.size(1) % 4 == 0, \ - 'the channel for input feature must be divisible by factor 4' - - # [B, C//4, H*W, 4] - output_shape = (input.size(0), input.size(1) // 4, boxes.size(1), 4) - output = input.new_zeros(output_shape) - # `argmax_idx` only used for backward - argmax_idx = input.new_zeros(output_shape).to(torch.int) - - ext_module.border_align_forward( - input, boxes, output, argmax_idx, pool_size=ctx.pool_size) - - ctx.save_for_backward(boxes, argmax_idx) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - boxes, argmax_idx = ctx.saved_tensors - grad_input = grad_output.new_zeros(ctx.input_shape) - # complex head architecture may cause grad_output uncontiguous - grad_output = grad_output.contiguous() - ext_module.border_align_backward( - grad_output, - boxes, - argmax_idx, - grad_input, - pool_size=ctx.pool_size) - return grad_input, None, None - - -border_align = BorderAlignFunction.apply - - -class BorderAlign(nn.Module): - r"""Border align pooling layer. - - Applies border_align over the input feature based on predicted bboxes. - The details were described in the paper - `BorderDet: Border Feature for Dense Object Detection - `_. - - For each border line (e.g. top, left, bottom or right) of each box, - border_align does the following: - 1. uniformly samples `pool_size`+1 positions on this line, involving \ - the start and end points. - 2. the corresponding features on these points are computed by \ - bilinear interpolation. - 3. max pooling over all the `pool_size`+1 positions are used for \ - computing pooled feature. - - Args: - pool_size (int): number of positions sampled over the boxes' borders - (e.g. top, bottom, left, right). - - """ - - def __init__(self, pool_size): - super(BorderAlign, self).__init__() - self.pool_size = pool_size - - def forward(self, input, boxes): - """ - Args: - input: Features with shape [N,4C,H,W]. Channels ranged in [0,C), - [C,2C), [2C,3C), [3C,4C) represent the top, left, bottom, - right features respectively. - boxes: Boxes with shape [N,H*W,4]. Coordinate format (x1,y1,x2,y2). - - Returns: - Tensor: Pooled features with shape [N,C,H*W,4]. The order is - (top,left,bottom,right) for the last dimension. - """ - return border_align(input, boxes, self.pool_size) - - def __repr__(self): - s = self.__class__.__name__ - s += f'(pool_size={self.pool_size})' - return s diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/model/codecs/gltf.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/model/codecs/gltf.py deleted file mode 100644 index 71e6a79e0d7627965bae9412ef4f940470a8d381..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/model/codecs/gltf.py +++ /dev/null @@ -1,248 +0,0 @@ -import json -import struct - -import pyglet - -from pyglet.gl import GL_BYTE, GL_UNSIGNED_BYTE, GL_SHORT, GL_UNSIGNED_SHORT, GL_FLOAT -from pyglet.gl import GL_UNSIGNED_INT, GL_ELEMENT_ARRAY_BUFFER, GL_ARRAY_BUFFER, GL_TRIANGLES - -from .. import Model, Material, MaterialGroup -from . import ModelDecodeException, ModelDecoder - - -# pyglet.graphics types -_pyglet_types = { - GL_BYTE: 'b', - GL_UNSIGNED_BYTE: 'B', - GL_SHORT: 's', - GL_UNSIGNED_SHORT: 'S', - GL_UNSIGNED_INT: 'I', - GL_FLOAT: 'f', -} - -# struct module types -_struct_types = { - GL_BYTE: 'b', - GL_UNSIGNED_BYTE: 'B', - GL_SHORT: 'h', - GL_UNSIGNED_SHORT: 'H', - GL_UNSIGNED_INT: 'I', - GL_FLOAT: 'f', -} - -# OpenGL type sizes -_component_sizes = { - GL_BYTE: 1, - GL_UNSIGNED_BYTE: 1, - GL_SHORT: 2, - GL_UNSIGNED_SHORT: 2, - GL_UNSIGNED_INT: 4, - GL_FLOAT: 4 -} - -_accessor_type_sizes = { - "SCALAR": 1, - "VEC2": 2, - "VEC3": 3, - "VEC4": 4, - "MAT2": 4, - "MAT3": 9, - "MAT4": 16 -} - -_targets = { - GL_ELEMENT_ARRAY_BUFFER: "element_array", - GL_ARRAY_BUFFER: "array", -} - -# GLTF to pyglet shorthand types: -_attributes = { - 'POSITION': 'v', - 'NORMAL': 'n', - 'TANGENT': None, - 'TEXCOORD_0': '0t', - 'TEXCOORD_1': '1t', - 'COLOR_0': 'c', - 'JOINTS_0': None, - 'WEIGHTS_0': None -} - - -class Buffer: - # TODO: support GLB format - # TODO: support data uris - def __init__(self, length, uri): - self._length = length - self._uri = uri - - def read(self, offset, length): - file = pyglet.resource.file(self._uri, 'rb') - file.seek(offset) - data = file.read(length) - file.close() - return data - - -class BufferView: - def __init__(self, buffer, offset, length, target, stride): - self.buffer = buffer - self.offset = offset - self.length = length - self.target = target - self.stride = stride - - -class Accessor: - # TODO: support sparse accessors - def __init__(self, buffer_view, offset, comp_type, count, - maximum, minimum, accessor_type, sparse): - self.buffer_view = buffer_view - self.offset = offset - self.component_type = comp_type - self.count = count - self.maximum = maximum - self.minimum = minimum - self.type = accessor_type - self.sparse = sparse - self.size = _component_sizes[comp_type] * _accessor_type_sizes[accessor_type] - - def read(self): - offset = self.offset + self.buffer_view.offset - length = self.size * self.count - stride = self.buffer_view.stride or 1 - # TODO: handle stride - data = self.buffer_view.buffer.read(offset, length) - return data - - -def parse_gltf_file(file, filename, batch): - - if file is None: - file = pyglet.resource.file(filename, 'r') - elif file.mode != 'r': - file.close() - file = pyglet.resource.file(filename, 'r') - - try: - data = json.load(file) - except json.JSONDecodeError: - raise ModelDecodeException('Json error. Does not appear to be a valid glTF file.') - finally: - file.close() - - if 'asset' not in data: - raise ModelDecodeException('Not a valid glTF file. Asset property not found.') - else: - if float(data['asset']['version']) < 2.0: - raise ModelDecodeException('Only glTF 2.0+ models are supported') - - buffers = dict() - buffer_views = dict() - accessors = dict() - materials = dict() - - for i, item in enumerate(data.get('buffers', [])): - buffers[i] = Buffer(item['byteLength'], item['uri']) - - for i, item in enumerate(data.get('bufferViews', [])): - buffer_index = item['buffer'] - buffer = buffers[buffer_index] - offset = item.get('byteOffset', 0) - length = item.get('byteLength') - target = item.get('target') - stride = item.get('byteStride', 1) - buffer_views[i] = BufferView(buffer, offset, length, target, stride) - - for i, item in enumerate(data.get('accessors', [])): - buf_view_index = item.get('bufferView') - buf_view = buffer_views[buf_view_index] - offset = item.get('byteOffset', 0) - comp_type = item.get('componentType') - count = item.get('count') - maxi = item.get('max') - mini = item.get('min') - acc_type = item.get('type') - sparse = item.get('sparse', None) - accessors[i] = Accessor(buf_view, offset, comp_type, count, maxi, mini, acc_type, sparse) - - vertex_lists = [] - - for mesh_data in data.get('meshes'): - - for primitive in mesh_data.get('primitives', []): - indices = None - attribute_list = [] - count = 0 - - for attribute_type, i in primitive['attributes'].items(): - accessor = accessors[i] - attrib = _attributes[attribute_type] - if not attrib: - # TODO: Add support for these attribute types to pyglet - continue - attrib_size = _accessor_type_sizes[accessor.type] - pyglet_type = _pyglet_types[accessor.component_type] - pyglet_fmt = "{0}{1}{2}".format(attrib, attrib_size, pyglet_type) - - count = accessor.count - struct_fmt = str(count * attrib_size) + _struct_types[accessor.component_type] - array = struct.unpack('<' + struct_fmt, accessor.read()) - - attribute_list.append((pyglet_fmt, array)) - - if 'indices' in primitive: - indices_index = primitive.get('indices') - accessor = accessors[indices_index] - attrib_size = _accessor_type_sizes[accessor.type] - fmt = str(accessor.count * attrib_size) + _struct_types[accessor.component_type] - indices = struct.unpack('<' + fmt, accessor.read()) - - # if 'material' in primitive: - # material_index = primitive.get('material') - # color = materials[material_index] - # attribute_list.append(('c4f', color * count)) - - diffuse = [1.0, 1.0, 1.0] - ambient = [1.0, 1.0, 1.0] - specular = [1.0, 1.0, 1.0] - emission = [0.0, 0.0, 0.0] - shininess = 100.0 - opacity = 1.0 - material = Material("Default", diffuse, ambient, specular, emission, shininess, opacity) - group = MaterialGroup(material=material) - - if indices: - vlist = batch.add_indexed(count, GL_TRIANGLES, group, indices, *attribute_list) - else: - vlist = batch.add(count, GL_TRIANGLES, group, *attribute_list) - - vertex_lists.append(vlist) - - return vertex_lists - - -################################################### -# Decoder definitions start here: -################################################### - -class GLTFModelDecoder(ModelDecoder): - def get_file_extensions(self): - return ['.gltf'] - - def decode(self, file, filename, batch): - - if not batch: - batch = pyglet.graphics.Batch() - - vertex_lists = parse_gltf_file(file=file, filename=filename, batch=batch) - textures = {} - - return Model(vertex_lists, textures, batch=batch) - - -def get_decoders(): - return [GLTFModelDecoder()] - - -def get_encoders(): - return [] diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/window/win32/__init__.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/window/win32/__init__.py deleted file mode 100644 index aa7145672e479cd60a94f18761b91882b293732f..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/window/win32/__init__.py +++ /dev/null @@ -1,1228 +0,0 @@ -from ctypes import * -from functools import lru_cache -import unicodedata - -from pyglet import compat_platform - -if compat_platform not in ('cygwin', 'win32'): - raise ImportError('Not a win32 platform.') - -import pyglet -from pyglet.window import BaseWindow, WindowException, MouseCursor -from pyglet.window import DefaultMouseCursor, _PlatformEventHandler, _ViewEventHandler -from pyglet.event import EventDispatcher -from pyglet.window import key, mouse - -from pyglet.canvas.win32 import Win32Canvas - -from pyglet.libs.win32 import _user32, _kernel32, _gdi32, _dwmapi, _shell32 -from pyglet.libs.win32.constants import * -from pyglet.libs.win32.winkey import * -from pyglet.libs.win32.types import * - -# symbol,ctrl -> motion mapping -_motion_map = { - (key.UP, False): key.MOTION_UP, - (key.RIGHT, False): key.MOTION_RIGHT, - (key.DOWN, False): key.MOTION_DOWN, - (key.LEFT, False): key.MOTION_LEFT, - (key.RIGHT, True): key.MOTION_NEXT_WORD, - (key.LEFT, True): key.MOTION_PREVIOUS_WORD, - (key.HOME, False): key.MOTION_BEGINNING_OF_LINE, - (key.END, False): key.MOTION_END_OF_LINE, - (key.PAGEUP, False): key.MOTION_PREVIOUS_PAGE, - (key.PAGEDOWN, False): key.MOTION_NEXT_PAGE, - (key.HOME, True): key.MOTION_BEGINNING_OF_FILE, - (key.END, True): key.MOTION_END_OF_FILE, - (key.BACKSPACE, False): key.MOTION_BACKSPACE, - (key.DELETE, False): key.MOTION_DELETE, - (key.C, True): key.MOTION_COPY, - (key.V, True): key.MOTION_PASTE -} - - -class Win32MouseCursor(MouseCursor): - gl_drawable = False - hw_drawable = True - - def __init__(self, cursor): - self.cursor = cursor - - -# This is global state, we have to be careful not to set the same state twice, -# which will throw off the ShowCursor counter. -_win32_cursor_visible = True - -Win32EventHandler = _PlatformEventHandler -ViewEventHandler = _ViewEventHandler - - -class Win32Window(BaseWindow): - _window_class = None - _hwnd = None - _dc = None - _wgl_context = None - _tracking = False - _hidden = False - _has_focus = False - - _exclusive_keyboard = False - _exclusive_keyboard_focus = True - _exclusive_mouse = False - _exclusive_mouse_focus = True - _exclusive_mouse_screen = None - _exclusive_mouse_lpos = None - _exclusive_mouse_buttons = 0 - _mouse_platform_visible = True - _pending_click = False - _in_title_bar = False - - _keyboard_state = {0x02A: False, 0x036: False} # For shift keys. - - _ws_style = 0 - _ex_ws_style = 0 - _minimum_size = None - _maximum_size = None - - def __init__(self, *args, **kwargs): - # Bind event handlers - self._event_handlers = {} - self._view_event_handlers = {} - for func_name in self._platform_event_names: - if not hasattr(self, func_name): - continue - func = getattr(self, func_name) - for message in func._platform_event_data: - if hasattr(func, '_view'): - self._view_event_handlers[message] = func - else: - self._event_handlers[message] = func - - self._always_dwm = sys.getwindowsversion() >= (6, 2) - self._interval = 0 - - super(Win32Window, self).__init__(*args, **kwargs) - - def _recreate(self, changes): - if 'context' in changes: - self._wgl_context = None - - self._create() - - def _create(self): - # Ensure style is set before determining width/height. - if self._fullscreen: - self._ws_style = WS_POPUP - self._ex_ws_style = 0 # WS_EX_TOPMOST - else: - styles = { - self.WINDOW_STYLE_DEFAULT: (WS_OVERLAPPEDWINDOW, 0), - self.WINDOW_STYLE_DIALOG: (WS_OVERLAPPED | WS_CAPTION | WS_SYSMENU, - WS_EX_DLGMODALFRAME), - self.WINDOW_STYLE_TOOL: (WS_OVERLAPPED | WS_CAPTION | WS_SYSMENU, - WS_EX_TOOLWINDOW), - self.WINDOW_STYLE_BORDERLESS: (WS_POPUP, 0), - self.WINDOW_STYLE_TRANSPARENT: (WS_OVERLAPPEDWINDOW, - WS_EX_LAYERED), - self.WINDOW_STYLE_OVERLAY: (WS_POPUP, - WS_EX_LAYERED | WS_EX_TRANSPARENT) - } - self._ws_style, self._ex_ws_style = styles[self._style] - - if self._resizable and not self._fullscreen: - self._ws_style |= WS_THICKFRAME - else: - self._ws_style &= ~(WS_THICKFRAME | WS_MAXIMIZEBOX) - - if self._fullscreen: - width = self.screen.width - height = self.screen.height - else: - width, height = \ - self._client_to_window_size(self._width, self._height) - - if not self._window_class: - module = _kernel32.GetModuleHandleW(None) - white = _gdi32.GetStockObject(WHITE_BRUSH) - black = _gdi32.GetStockObject(BLACK_BRUSH) - self._window_class = WNDCLASS() - self._window_class.lpszClassName = u'GenericAppClass%d' % id(self) - self._window_class.lpfnWndProc = WNDPROC( - self._get_window_proc(self._event_handlers)) - self._window_class.style = CS_VREDRAW | CS_HREDRAW | CS_OWNDC - self._window_class.hInstance = 0 - self._window_class.hIcon = _user32.LoadImageW(module, MAKEINTRESOURCE(1), IMAGE_ICON, - 0, 0, LR_DEFAULTSIZE | LR_SHARED) - self._window_class.hbrBackground = black - self._window_class.lpszMenuName = None - self._window_class.cbClsExtra = 0 - self._window_class.cbWndExtra = 0 - _user32.RegisterClassW(byref(self._window_class)) - - self._view_window_class = WNDCLASS() - self._view_window_class.lpszClassName = \ - u'GenericViewClass%d' % id(self) - self._view_window_class.lpfnWndProc = WNDPROC( - self._get_window_proc(self._view_event_handlers)) - self._view_window_class.style = 0 - self._view_window_class.hInstance = 0 - self._view_window_class.hIcon = 0 - self._view_window_class.hbrBackground = white - self._view_window_class.lpszMenuName = None - self._view_window_class.cbClsExtra = 0 - self._view_window_class.cbWndExtra = 0 - _user32.RegisterClassW(byref(self._view_window_class)) - - if not self._hwnd: - self._hwnd = _user32.CreateWindowExW( - self._ex_ws_style, - self._window_class.lpszClassName, - u'', - self._ws_style, - CW_USEDEFAULT, - CW_USEDEFAULT, - width, - height, - 0, - 0, - self._window_class.hInstance, - 0) - - # View Hwnd is for the client area so certain events (mouse events) don't trigger outside of area. - self._view_hwnd = _user32.CreateWindowExW( - 0, - self._view_window_class.lpszClassName, - u'', - WS_CHILD | WS_VISIBLE, - 0, 0, 0, 0, - self._hwnd, - 0, - self._view_window_class.hInstance, - 0) - - self._dc = _user32.GetDC(self._view_hwnd) - - # Only allow files being dropped if specified. - if self._file_drops: - # Allows UAC to not block the drop files request if low permissions. All 3 must be set. - if WINDOWS_7_OR_GREATER: - _user32.ChangeWindowMessageFilterEx(self._hwnd, WM_DROPFILES, MSGFLT_ALLOW, None) - _user32.ChangeWindowMessageFilterEx(self._hwnd, WM_COPYDATA, MSGFLT_ALLOW, None) - _user32.ChangeWindowMessageFilterEx(self._hwnd, WM_COPYGLOBALDATA, MSGFLT_ALLOW, None) - - _shell32.DragAcceptFiles(self._hwnd, True) - - # Set the raw keyboard to handle shift state. This is required as legacy events cannot handle shift states - # when both keys are used together. View Hwnd as none changes focus to follow keyboard. - raw_keyboard = RAWINPUTDEVICE(0x01, 0x06, 0, None) - if not _user32.RegisterRawInputDevices( - byref(raw_keyboard), 1, sizeof(RAWINPUTDEVICE)): - print("Warning: Failed to unregister raw input keyboard.") - else: - # Window already exists, update it with new style - - # We need to hide window here, otherwise Windows forgets - # to redraw the whole screen after leaving fullscreen. - _user32.ShowWindow(self._hwnd, SW_HIDE) - - _user32.SetWindowLongW(self._hwnd, - GWL_STYLE, - self._ws_style) - _user32.SetWindowLongW(self._hwnd, - GWL_EXSTYLE, - self._ex_ws_style) - - # Position and size window - if self._fullscreen: - hwnd_after = HWND_TOPMOST if self.style == "overlay" else HWND_NOTOPMOST - _user32.SetWindowPos(self._hwnd, hwnd_after, - self._screen.x, self._screen.y, width, height, SWP_FRAMECHANGED) - elif False: # TODO location not in pyglet API - x, y = self._client_to_window_pos(*factory.get_location()) - _user32.SetWindowPos(self._hwnd, HWND_NOTOPMOST, - x, y, width, height, SWP_FRAMECHANGED) - elif self.style == 'transparent' or self.style == "overlay": - _user32.SetLayeredWindowAttributes(self._hwnd, 0, 254, LWA_ALPHA) - if self.style == "overlay": - _user32.SetWindowPos(self._hwnd, HWND_TOPMOST, 0, - 0, width, height, SWP_NOMOVE | SWP_NOSIZE) - else: - _user32.SetWindowPos(self._hwnd, HWND_NOTOPMOST, - 0, 0, width, height, SWP_NOMOVE | SWP_FRAMECHANGED) - - self._update_view_location(self._width, self._height) - - # Context must be created after window is created. - if not self._wgl_context: - self.canvas = Win32Canvas(self.display, self._view_hwnd, self._dc) - self.context.attach(self.canvas) - self._wgl_context = self.context._context - - self.switch_to() - - self.set_caption(self._caption) - self.set_vsync(self._vsync) - - if self._visible: - self.set_visible() - # Might need resize event if going from fullscreen to fullscreen - self.dispatch_event('on_resize', self._width, self._height) - self.dispatch_event('on_expose') - - def _update_view_location(self, width, height): - if self._fullscreen: - x = (self.screen.width - width) // 2 - y = (self.screen.height - height) // 2 - else: - x = y = 0 - _user32.SetWindowPos(self._view_hwnd, 0, - x, y, width, height, SWP_NOZORDER | SWP_NOOWNERZORDER) - - def close(self): - if not self._hwnd: - super(Win32Window, self).close() - return - - self.set_mouse_platform_visible(True) - - _user32.DestroyWindow(self._hwnd) - _user32.UnregisterClassW(self._view_window_class.lpszClassName, 0) - _user32.UnregisterClassW(self._window_class.lpszClassName, 0) - - self._window_class = None - self._view_window_class = None - self._view_event_handlers.clear() - self._event_handlers.clear() - self._hwnd = None - self._dc = None - self._wgl_context = None - super(Win32Window, self).close() - - def _dwm_composition_enabled(self): - """ Checks if Windows DWM is enabled (Windows Vista+) - Note: Always on for Windows 8+ - """ - is_enabled = c_int() - _dwmapi.DwmIsCompositionEnabled(byref(is_enabled)) - return is_enabled.value - - def _get_vsync(self): - return bool(self._interval) - - vsync = property(_get_vsync) # overrides BaseWindow property - - def set_vsync(self, vsync): - if pyglet.options['vsync'] is not None: - vsync = pyglet.options['vsync'] - - self._interval = vsync - - if not self._fullscreen: - # Disable interval if composition is enabled to avoid conflict with DWM. - if self._always_dwm or self._dwm_composition_enabled(): - vsync = 0 - - self.context.set_vsync(vsync) - - def switch_to(self): - self.context.set_current() - - def update_transparency(self): - region = _gdi32.CreateRectRgn(0, 0, -1, -1) - bb = DWM_BLURBEHIND() - bb.dwFlags = DWM_BB_ENABLE | DWM_BB_BLURREGION - bb.hRgnBlur = region - bb.fEnable = True - - _dwmapi.DwmEnableBlurBehindWindow(self._hwnd, ctypes.byref(bb)) - _gdi32.DeleteObject(region) - - def flip(self): - self.draw_mouse_cursor() - - if not self._fullscreen: - if self._always_dwm or self._dwm_composition_enabled(): - if self._interval: - _dwmapi.DwmFlush() - - if self.style in ('overlay', 'transparent'): - self.update_transparency() - - self.context.flip() - - def set_location(self, x, y): - x, y = self._client_to_window_pos(x, y) - _user32.SetWindowPos(self._hwnd, 0, x, y, 0, 0, - (SWP_NOZORDER | - SWP_NOSIZE | - SWP_NOOWNERZORDER)) - - def get_location(self): - rect = RECT() - _user32.GetClientRect(self._hwnd, byref(rect)) - point = POINT() - point.x = rect.left - point.y = rect.top - _user32.ClientToScreen(self._hwnd, byref(point)) - return point.x, point.y - - def set_size(self, width, height): - super().set_size(width, height) - width, height = self._client_to_window_size(width, height) - _user32.SetWindowPos(self._hwnd, 0, 0, 0, width, height, - (SWP_NOZORDER | SWP_NOMOVE | SWP_NOOWNERZORDER)) - self.dispatch_event('on_resize', width, height) - - def get_size(self): - # rect = RECT() - # _user32.GetClientRect(self._hwnd, byref(rect)) - # return rect.right - rect.left, rect.bottom - rect.top - return self._width, self._height - - def set_minimum_size(self, width, height): - self._minimum_size = width, height - - def set_maximum_size(self, width, height): - self._maximum_size = width, height - - def activate(self): - _user32.SetForegroundWindow(self._hwnd) - - def set_visible(self, visible=True): - if visible: - insertAfter = HWND_TOP - _user32.SetWindowPos(self._hwnd, insertAfter, 0, 0, 0, 0, - SWP_NOMOVE | SWP_NOSIZE | SWP_SHOWWINDOW) - self.dispatch_event('on_resize', self._width, self._height) - self.activate() - self.dispatch_event('on_show') - else: - _user32.ShowWindow(self._hwnd, SW_HIDE) - self.dispatch_event('on_hide') - self._visible = visible - self.set_mouse_platform_visible() - - def minimize(self): - _user32.ShowWindow(self._hwnd, SW_MINIMIZE) - - def maximize(self): - _user32.ShowWindow(self._hwnd, SW_MAXIMIZE) - - def set_caption(self, caption): - self._caption = caption - _user32.SetWindowTextW(self._hwnd, c_wchar_p(caption)) - - def set_mouse_platform_visible(self, platform_visible=None): - if platform_visible is None: - platform_visible = (self._mouse_visible and - not self._exclusive_mouse and - (not self._mouse_cursor.gl_drawable or self._mouse_cursor.hw_drawable)) or \ - (not self._mouse_in_window or - not self._has_focus) - - if platform_visible and self._mouse_cursor.hw_drawable: - if isinstance(self._mouse_cursor, Win32MouseCursor): - cursor = self._mouse_cursor.cursor - elif isinstance(self._mouse_cursor, DefaultMouseCursor): - cursor = _user32.LoadCursorW(None, MAKEINTRESOURCE(IDC_ARROW)) - else: - cursor = self._create_cursor_from_image(self._mouse_cursor) - - _user32.SetClassLongPtrW(self._view_hwnd, GCL_HCURSOR, cursor) - _user32.SetCursor(cursor) - - if platform_visible == self._mouse_platform_visible: - return - - self._set_cursor_visibility(platform_visible) - - self._mouse_platform_visible = platform_visible - - def _set_cursor_visibility(self, platform_visible): - # Avoid calling ShowCursor with the current visibility (which would - # push the counter too far away from zero). - global _win32_cursor_visible - if _win32_cursor_visible != platform_visible: - _user32.ShowCursor(platform_visible) - _win32_cursor_visible = platform_visible - - def _update_clipped_cursor(self): - # Clip to client area, to prevent large mouse movements taking - # it outside the client area. - if self._in_title_bar or self._pending_click: - return - - rect = RECT() - _user32.GetClientRect(self._view_hwnd, byref(rect)) - _user32.MapWindowPoints(self._view_hwnd, HWND_DESKTOP, - byref(rect), 2) - - # For some reason borders can be off 1 pixel, allowing cursor into frame/minimize/exit buttons? - rect.top += 1 - rect.left += 1 - rect.right -= 1 - rect.bottom -= 1 - - _user32.ClipCursor(byref(rect)) - - def set_exclusive_mouse(self, exclusive=True): - if self._exclusive_mouse == exclusive and \ - self._exclusive_mouse_focus == self._has_focus: - return - - # Mouse: UsagePage = 1, Usage = 2 - raw_mouse = RAWINPUTDEVICE(0x01, 0x02, 0, None) - if not exclusive: - raw_mouse.dwFlags = RIDEV_REMOVE - raw_mouse.hwndTarget = None - - if not _user32.RegisterRawInputDevices( - byref(raw_mouse), 1, sizeof(RAWINPUTDEVICE)): - if exclusive: - raise WindowException("Cannot enter mouse exclusive mode.") - - self._exclusive_mouse_buttons = 0 - if exclusive and self._has_focus: - self._update_clipped_cursor() - else: - # Release clip - _user32.ClipCursor(None) - - self._exclusive_mouse = exclusive - self._exclusive_mouse_focus = self._has_focus - self.set_mouse_platform_visible(not exclusive) - - def set_mouse_position(self, x, y, absolute=False): - if not absolute: - rect = RECT() - _user32.GetClientRect(self._view_hwnd, byref(rect)) - _user32.MapWindowPoints(self._view_hwnd, HWND_DESKTOP, byref(rect), 2) - - x = x + rect.left - y = rect.top + (rect.bottom - rect.top) - y - - _user32.SetCursorPos(x, y) - - def set_exclusive_keyboard(self, exclusive=True): - if self._exclusive_keyboard == exclusive and \ - self._exclusive_keyboard_focus == self._has_focus: - return - - if exclusive and self._has_focus: - _user32.RegisterHotKey(self._hwnd, 0, WIN32_MOD_ALT, VK_TAB) - elif self._exclusive_keyboard and not exclusive: - _user32.UnregisterHotKey(self._hwnd, 0) - - self._exclusive_keyboard = exclusive - self._exclusive_keyboard_focus = self._has_focus - - def get_system_mouse_cursor(self, name): - if name == self.CURSOR_DEFAULT: - return DefaultMouseCursor() - - names = { - self.CURSOR_CROSSHAIR: IDC_CROSS, - self.CURSOR_HAND: IDC_HAND, - self.CURSOR_HELP: IDC_HELP, - self.CURSOR_NO: IDC_NO, - self.CURSOR_SIZE: IDC_SIZEALL, - self.CURSOR_SIZE_UP: IDC_SIZENS, - self.CURSOR_SIZE_UP_RIGHT: IDC_SIZENESW, - self.CURSOR_SIZE_RIGHT: IDC_SIZEWE, - self.CURSOR_SIZE_DOWN_RIGHT: IDC_SIZENWSE, - self.CURSOR_SIZE_DOWN: IDC_SIZENS, - self.CURSOR_SIZE_DOWN_LEFT: IDC_SIZENESW, - self.CURSOR_SIZE_LEFT: IDC_SIZEWE, - self.CURSOR_SIZE_UP_LEFT: IDC_SIZENWSE, - self.CURSOR_SIZE_UP_DOWN: IDC_SIZENS, - self.CURSOR_SIZE_LEFT_RIGHT: IDC_SIZEWE, - self.CURSOR_TEXT: IDC_IBEAM, - self.CURSOR_WAIT: IDC_WAIT, - self.CURSOR_WAIT_ARROW: IDC_APPSTARTING, - } - if name not in names: - raise RuntimeError('Unknown cursor name "%s"' % name) - cursor = _user32.LoadCursorW(None, MAKEINTRESOURCE(names[name])) - return Win32MouseCursor(cursor) - - def set_icon(self, *images): - # XXX Undocumented AFAICT, but XP seems happy to resize an image - # of any size, so no scaling necessary. - - def best_image(width, height): - # A heuristic for finding closest sized image to required size. - image = images[0] - for img in images: - if img.width == width and img.height == height: - # Exact match always used - return img - elif img.width >= width and \ - img.width * img.height > image.width * image.height: - # At least wide enough, and largest area - image = img - return image - - def get_icon(image): - # Alpha-blended icon: see http://support.microsoft.com/kb/318876 - format = 'BGRA' - pitch = len(format) * image.width - - header = BITMAPV5HEADER() - header.bV5Size = sizeof(header) - header.bV5Width = image.width - header.bV5Height = image.height - header.bV5Planes = 1 - header.bV5BitCount = 32 - header.bV5Compression = BI_BITFIELDS - header.bV5RedMask = 0x00ff0000 - header.bV5GreenMask = 0x0000ff00 - header.bV5BlueMask = 0x000000ff - header.bV5AlphaMask = 0xff000000 - - hdc = _user32.GetDC(None) - dataptr = c_void_p() - bitmap = _gdi32.CreateDIBSection(hdc, byref(header), DIB_RGB_COLORS, - byref(dataptr), None, 0) - _user32.ReleaseDC(None, hdc) - - image = image.get_image_data() - data = image.get_data(format, pitch) - memmove(dataptr, data, len(data)) - - mask = _gdi32.CreateBitmap(image.width, image.height, 1, 1, None) - - iconinfo = ICONINFO() - iconinfo.fIcon = True - iconinfo.hbmMask = mask - iconinfo.hbmColor = bitmap - icon = _user32.CreateIconIndirect(byref(iconinfo)) - - _gdi32.DeleteObject(mask) - _gdi32.DeleteObject(bitmap) - - return icon - - # Set large icon - image = best_image(_user32.GetSystemMetrics(SM_CXICON), - _user32.GetSystemMetrics(SM_CYICON)) - icon = get_icon(image) - _user32.SetClassLongPtrW(self._hwnd, GCL_HICON, icon) - - # Set small icon - image = best_image(_user32.GetSystemMetrics(SM_CXSMICON), - _user32.GetSystemMetrics(SM_CYSMICON)) - icon = get_icon(image) - _user32.SetClassLongPtrW(self._hwnd, GCL_HICONSM, icon) - - @lru_cache() - def _create_cursor_from_image(self, cursor): - """Creates platform cursor from an ImageCursor instance.""" - fmt = 'BGRA' - image = cursor.texture - pitch = len(fmt) * image.width - - header = BITMAPINFOHEADER() - header.biSize = sizeof(header) - header.biWidth = image.width - header.biHeight = image.height - header.biPlanes = 1 - header.biBitCount = 32 - - hdc = _user32.GetDC(None) - dataptr = c_void_p() - bitmap = _gdi32.CreateDIBSection(hdc, byref(header), DIB_RGB_COLORS, - byref(dataptr), None, 0) - _user32.ReleaseDC(None, hdc) - - image = image.get_image_data() - data = image.get_data(fmt, pitch) - memmove(dataptr, data, len(data)) - - mask = _gdi32.CreateBitmap(image.width, image.height, 1, 1, None) - - iconinfo = ICONINFO() - iconinfo.fIcon = False - iconinfo.hbmMask = mask - iconinfo.hbmColor = bitmap - iconinfo.xHotspot = int(cursor.hot_x) - iconinfo.yHotspot = int(image.height - cursor.hot_y) - icon = _user32.CreateIconIndirect(byref(iconinfo)) - - _gdi32.DeleteObject(mask) - _gdi32.DeleteObject(bitmap) - - return icon - - # Private util - - def _client_to_window_size(self, width, height): - rect = RECT() - rect.left = 0 - rect.top = 0 - rect.right = width - rect.bottom = height - _user32.AdjustWindowRectEx(byref(rect), - self._ws_style, False, self._ex_ws_style) - return rect.right - rect.left, rect.bottom - rect.top - - def _client_to_window_pos(self, x, y): - rect = RECT() - rect.left = x - rect.top = y - _user32.AdjustWindowRectEx(byref(rect), - self._ws_style, False, self._ex_ws_style) - return rect.left, rect.top - - # Event dispatching - - def dispatch_events(self): - """Legacy or manual dispatch.""" - from pyglet import app - app.platform_event_loop.start() - self._allow_dispatch_event = True - self.dispatch_pending_events() - - msg = MSG() - while _user32.PeekMessageW(byref(msg), 0, 0, 0, PM_REMOVE): - _user32.TranslateMessage(byref(msg)) - _user32.DispatchMessageW(byref(msg)) - self._allow_dispatch_event = False - - def dispatch_pending_events(self): - """Legacy or manual dispatch.""" - while self._event_queue: - event = self._event_queue.pop(0) - if type(event[0]) is str: - # pyglet event - EventDispatcher.dispatch_event(self, *event) - else: - # win32 event - event[0](*event[1:]) - - def _get_window_proc(self, event_handlers): - def f(hwnd, msg, wParam, lParam): - event_handler = event_handlers.get(msg, None) - result = None - if event_handler: - if self._allow_dispatch_event or not self._enable_event_queue: - result = event_handler(msg, wParam, lParam) - else: - result = 0 - self._event_queue.append((event_handler, msg, - wParam, lParam)) - if result is None: - result = _user32.DefWindowProcW(hwnd, msg, wParam, lParam) - return result - - return f - - # Event handlers - - def _get_modifiers(self, key_lParam=0): - modifiers = 0 - if self._keyboard_state[0x036] or self._keyboard_state[0x02A]: - modifiers |= key.MOD_SHIFT - if _user32.GetKeyState(VK_CONTROL) & 0xff00: - modifiers |= key.MOD_CTRL - if _user32.GetKeyState(VK_LWIN) & 0xff00: - modifiers |= key.MOD_WINDOWS - if _user32.GetKeyState(VK_CAPITAL) & 0x00ff: # toggle - modifiers |= key.MOD_CAPSLOCK - if _user32.GetKeyState(VK_NUMLOCK) & 0x00ff: # toggle - modifiers |= key.MOD_NUMLOCK - if _user32.GetKeyState(VK_SCROLL) & 0x00ff: # toggle - modifiers |= key.MOD_SCROLLLOCK - - if key_lParam: - if key_lParam & (1 << 29): - modifiers |= key.MOD_ALT - elif _user32.GetKeyState(VK_MENU) < 0: - modifiers |= key.MOD_ALT - return modifiers - - @staticmethod - def _get_location(lParam): - x = c_int16(lParam & 0xffff).value - y = c_int16(lParam >> 16).value - return x, y - - @Win32EventHandler(WM_KEYDOWN) - @Win32EventHandler(WM_KEYUP) - @Win32EventHandler(WM_SYSKEYDOWN) - @Win32EventHandler(WM_SYSKEYUP) - def _event_key(self, msg, wParam, lParam): - repeat = False - if lParam & (1 << 30): - if msg not in (WM_KEYUP, WM_SYSKEYUP): - repeat = True - ev = 'on_key_release' - else: - ev = 'on_key_press' - - symbol = keymap.get(wParam, None) - if symbol is None: - ch = _user32.MapVirtualKeyW(wParam, MAPVK_VK_TO_CHAR) - symbol = chmap.get(ch) - - if symbol is None: - symbol = key.user_key(wParam) - elif symbol == key.LCTRL and lParam & (1 << 24): - symbol = key.RCTRL - elif symbol == key.LALT and lParam & (1 << 24): - symbol = key.RALT - - if wParam == VK_SHIFT: - return # Let raw input handle this instead. - - modifiers = self._get_modifiers(lParam) - - if not repeat: - self.dispatch_event(ev, symbol, modifiers) - - ctrl = modifiers & key.MOD_CTRL != 0 - if (symbol, ctrl) in _motion_map and msg not in (WM_KEYUP, WM_SYSKEYUP): - motion = _motion_map[symbol, ctrl] - if modifiers & key.MOD_SHIFT: - self.dispatch_event('on_text_motion_select', motion) - else: - self.dispatch_event('on_text_motion', motion) - - # Send on to DefWindowProc if not exclusive. - if self._exclusive_keyboard: - return 0 - else: - return None - - @Win32EventHandler(WM_NCLBUTTONDOWN) - def _event_ncl_button_down(self, msg, wParam, lParam): - self._in_title_bar = True - - @Win32EventHandler(WM_CAPTURECHANGED) - def _event_capture_changed(self, msg, wParam, lParam): - self._in_title_bar = False - - if self._exclusive_mouse: - state = _user32.GetAsyncKeyState(VK_LBUTTON) - if not state & 0x8000: # released - if self._pending_click: - self._pending_click = False - - if self._has_focus or not self._hidden: - self._update_clipped_cursor() - - @Win32EventHandler(WM_CHAR) - def _event_char(self, msg, wParam, lParam): - text = chr(wParam) - if unicodedata.category(text) != 'Cc' or text == '\r': - self.dispatch_event('on_text', text) - return 0 - - @Win32EventHandler(WM_INPUT) - def _event_raw_input(self, msg, wParam, lParam): - hRawInput = cast(lParam, HRAWINPUT) - inp = RAWINPUT() - size = UINT(sizeof(inp)) - _user32.GetRawInputData(hRawInput, RID_INPUT, byref(inp), - byref(size), sizeof(RAWINPUTHEADER)) - - if inp.header.dwType == RIM_TYPEMOUSE: - if not self._exclusive_mouse: - return 0 - - rmouse = inp.data.mouse - - if rmouse.usFlags & 0x01 == MOUSE_MOVE_RELATIVE: - if rmouse.lLastX != 0 or rmouse.lLastY != 0: - # Motion event - # In relative motion, Y axis is positive for below. - # We invert it for Pyglet so positive is motion up. - if self._exclusive_mouse_buttons: - self.dispatch_event('on_mouse_drag', 0, 0, - rmouse.lLastX, -rmouse.lLastY, - self._exclusive_mouse_buttons, - self._get_modifiers()) - else: - self.dispatch_event('on_mouse_motion', 0, 0, - rmouse.lLastX, -rmouse.lLastY) - else: - if self._exclusive_mouse_lpos is None: - self._exclusive_mouse_lpos = rmouse.lLastX, rmouse.lLastY - last_x, last_y = self._exclusive_mouse_lpos - rel_x = rmouse.lLastX - last_x - rel_y = rmouse.lLastY - last_y - if rel_x != 0 or rel_y != 0.0: - # Motion event - if self._exclusive_mouse_buttons: - self.dispatch_event('on_mouse_drag', 0, 0, - rmouse.lLastX, -rmouse.lLastY, - self._exclusive_mouse_buttons, - self._get_modifiers()) - else: - self.dispatch_event('on_mouse_motion', 0, 0, - rel_x, rel_y) - self._exclusive_mouse_lpos = rmouse.lLastX, rmouse.lLastY - - elif inp.header.dwType == RIM_TYPEKEYBOARD: - if inp.data.keyboard.VKey == 255: - return 0 - - key_up = inp.data.keyboard.Flags & RI_KEY_BREAK - - if inp.data.keyboard.MakeCode == 0x02A: # LEFT_SHIFT - if not key_up and not self._keyboard_state[0x02A]: - self._keyboard_state[0x02A] = True - self.dispatch_event('on_key_press', key.LSHIFT, self._get_modifiers()) - - elif key_up and self._keyboard_state[0x02A]: - self._keyboard_state[0x02A] = False - self.dispatch_event('on_key_release', key.LSHIFT, self._get_modifiers()) - - elif inp.data.keyboard.MakeCode == 0x036: # RIGHT SHIFT - if not key_up and not self._keyboard_state[0x036]: - self._keyboard_state[0x036] = True - self.dispatch_event('on_key_press', key.RSHIFT, self._get_modifiers()) - - elif key_up and self._keyboard_state[0x036]: - self._keyboard_state[0x036] = False - self.dispatch_event('on_key_release', key.RSHIFT, self._get_modifiers()) - - return 0 - - @ViewEventHandler - @Win32EventHandler(WM_MOUSEMOVE) - def _event_mousemove(self, msg, wParam, lParam): - if self._exclusive_mouse and self._has_focus: - return 0 - - x, y = self._get_location(lParam) - y = self._height - y - - dx = x - self._mouse_x - dy = y - self._mouse_y - - if not self._tracking: - # There is no WM_MOUSEENTER message (!), so fake it from the - # first WM_MOUSEMOVE event after leaving. Use self._tracking - # to determine when to recreate the tracking structure after - # re-entering (to track the next WM_MOUSELEAVE). - self._mouse_in_window = True - self.set_mouse_platform_visible() - self.dispatch_event('on_mouse_enter', x, y) - self._tracking = True - track = TRACKMOUSEEVENT() - track.cbSize = sizeof(track) - track.dwFlags = TME_LEAVE - track.hwndTrack = self._view_hwnd - _user32.TrackMouseEvent(byref(track)) - - # Don't generate motion/drag events when mouse hasn't moved. (Issue - # 305) - if self._mouse_x == x and self._mouse_y == y: - return 0 - - self._mouse_x = x - self._mouse_y = y - - buttons = 0 - if wParam & MK_LBUTTON: - buttons |= mouse.LEFT - if wParam & MK_MBUTTON: - buttons |= mouse.MIDDLE - if wParam & MK_RBUTTON: - buttons |= mouse.RIGHT - if wParam & MK_XBUTTON1: - buttons |= mouse.MOUSE4 - if wParam & MK_XBUTTON2: - buttons |= mouse.MOUSE5 - - if buttons: - # Drag event - modifiers = self._get_modifiers() - self.dispatch_event('on_mouse_drag', - x, y, dx, dy, buttons, modifiers) - else: - # Motion event - self.dispatch_event('on_mouse_motion', x, y, dx, dy) - return 0 - - @ViewEventHandler - @Win32EventHandler(WM_MOUSELEAVE) - def _event_mouseleave(self, msg, wParam, lParam): - point = POINT() - _user32.GetCursorPos(byref(point)) - _user32.ScreenToClient(self._view_hwnd, byref(point)) - x = point.x - y = self._height - point.y - self._tracking = False - self._mouse_in_window = False - self.set_mouse_platform_visible() - self.dispatch_event('on_mouse_leave', x, y) - return 0 - - def _event_mousebutton(self, ev, button, lParam): - if ev == 'on_mouse_press': - _user32.SetCapture(self._view_hwnd) - else: - _user32.ReleaseCapture() - x, y = self._get_location(lParam) - y = self._height - y - self.dispatch_event(ev, x, y, button, self._get_modifiers()) - return 0 - - @ViewEventHandler - @Win32EventHandler(WM_LBUTTONDOWN) - def _event_lbuttondown(self, msg, wParam, lParam): - return self._event_mousebutton( - 'on_mouse_press', mouse.LEFT, lParam) - - @ViewEventHandler - @Win32EventHandler(WM_LBUTTONUP) - def _event_lbuttonup(self, msg, wParam, lParam): - return self._event_mousebutton( - 'on_mouse_release', mouse.LEFT, lParam) - - @ViewEventHandler - @Win32EventHandler(WM_MBUTTONDOWN) - def _event_mbuttondown(self, msg, wParam, lParam): - return self._event_mousebutton( - 'on_mouse_press', mouse.MIDDLE, lParam) - - @ViewEventHandler - @Win32EventHandler(WM_MBUTTONUP) - def _event_mbuttonup(self, msg, wParam, lParam): - return self._event_mousebutton( - 'on_mouse_release', mouse.MIDDLE, lParam) - - @ViewEventHandler - @Win32EventHandler(WM_RBUTTONDOWN) - def _event_rbuttondown(self, msg, wParam, lParam): - return self._event_mousebutton( - 'on_mouse_press', mouse.RIGHT, lParam) - - @ViewEventHandler - @Win32EventHandler(WM_RBUTTONUP) - def _event_rbuttonup(self, msg, wParam, lParam): - return self._event_mousebutton( - 'on_mouse_release', mouse.RIGHT, lParam) - - @ViewEventHandler - @Win32EventHandler(WM_XBUTTONDOWN) - def _event_xbuttondown(self, msg, wParam, lParam): - if c_short(wParam >> 16).value == 1: - button = mouse.MOUSE4 - if c_short(wParam >> 16).value == 2: - button = mouse.MOUSE5 - return self._event_mousebutton( - 'on_mouse_press', button, lParam) - - @ViewEventHandler - @Win32EventHandler(WM_XBUTTONUP) - def _event_xbuttonup(self, msg, wParam, lParam): - if c_short(wParam >> 16).value == 1: - button = mouse.MOUSE4 - if c_short(wParam >> 16).value == 2: - button = mouse.MOUSE5 - return self._event_mousebutton( - 'on_mouse_release', button, lParam) - - @Win32EventHandler(WM_MOUSEWHEEL) - def _event_mousewheel(self, msg, wParam, lParam): - delta = c_short(wParam >> 16).value - self.dispatch_event('on_mouse_scroll', - self._mouse_x, self._mouse_y, 0, delta / float(WHEEL_DELTA)) - return 0 - - @Win32EventHandler(WM_CLOSE) - def _event_close(self, msg, wParam, lParam): - self.dispatch_event('on_close') - return 0 - - @ViewEventHandler - @Win32EventHandler(WM_PAINT) - def _event_paint(self, msg, wParam, lParam): - self.dispatch_event('on_expose') - - # Validating the window using ValidateRect or ValidateRgn - # doesn't clear the paint message when more than one window - # is open [why?]; defer to DefWindowProc instead. - return None - - @Win32EventHandler(WM_SIZING) - def _event_sizing(self, msg, wParam, lParam): - # rect = cast(lParam, POINTER(RECT)).contents - # width, height = self.get_size() - - from pyglet import app - if app.event_loop is not None: - app.event_loop.enter_blocking() - return 1 - - @Win32EventHandler(WM_SIZE) - def _event_size(self, msg, wParam, lParam): - if not self._dc: - # Ignore window creation size event (appears for fullscreen - # only) -- we haven't got DC or HWND yet. - return None - - if wParam == SIZE_MINIMIZED: - # Minimized, not resized. - self._hidden = True - self.dispatch_event('on_hide') - return 0 - if self._hidden: - # Restored - self._hidden = False - self.dispatch_event('on_show') - w, h = self._get_location(lParam) - if not self._fullscreen: - self._width, self._height = w, h - self._update_view_location(self._width, self._height) - - if self._exclusive_mouse: - self._update_clipped_cursor() - - self.switch_to() - self.dispatch_event('on_resize', self._width, self._height) - return 0 - - @Win32EventHandler(WM_SYSCOMMAND) - def _event_syscommand(self, msg, wParam, lParam): - # check for ALT key to prevent app from hanging because there is - # no windows menu bar - if wParam == SC_KEYMENU and lParam & (1 >> 16) <= 0: - return 0 - - if wParam & 0xfff0 in (SC_MOVE, SC_SIZE): - # Should be in WM_ENTERSIZEMOVE, but we never get that message. - from pyglet import app - - if app.event_loop is not None: - app.event_loop.enter_blocking() - - @Win32EventHandler(WM_MOVE) - def _event_move(self, msg, wParam, lParam): - x, y = self._get_location(lParam) - self.dispatch_event('on_move', x, y) - return 0 - - @Win32EventHandler(WM_SETCURSOR) - def _event_setcursor(self, msg, wParam, lParam): - if self._exclusive_mouse and not self._mouse_platform_visible: - lo, hi = self._get_location(lParam) - if lo == HTCLIENT: # In frame - self._set_cursor_visibility(False) - return 1 - elif lo in (HTCAPTION, HTCLOSE, HTMAXBUTTON, HTMINBUTTON): # Allow in - self._set_cursor_visibility(True) - return 1 - - @Win32EventHandler(WM_ENTERSIZEMOVE) - def _event_entersizemove(self, msg, wParam, lParam): - self._moving = True - from pyglet import app - if app.event_loop is not None: - app.event_loop.exit_blocking() - - @Win32EventHandler(WM_EXITSIZEMOVE) - def _event_exitsizemove(self, msg, wParam, lParam): - self._moving = False - from pyglet import app - if app.event_loop is not None: - app.event_loop.exit_blocking() - - if self._exclusive_mouse: - self._update_clipped_cursor() - - @Win32EventHandler(WM_SETFOCUS) - def _event_setfocus(self, msg, wParam, lParam): - self.dispatch_event('on_activate') - self._has_focus = True - - if self._exclusive_mouse: - if _user32.GetAsyncKeyState(VK_LBUTTON): - self._pending_click = True - - self.set_exclusive_keyboard(self._exclusive_keyboard) - self.set_exclusive_mouse(self._exclusive_mouse) - - return 0 - - @Win32EventHandler(WM_KILLFOCUS) - def _event_killfocus(self, msg, wParam, lParam): - self.dispatch_event('on_deactivate') - self._has_focus = False - - exclusive_keyboard = self._exclusive_keyboard - exclusive_mouse = self._exclusive_mouse - # Disable both exclusive keyboard and mouse - self.set_exclusive_keyboard(False) - self.set_exclusive_mouse(False) - - # Reset shift state on Window focus loss. - for symbol in self._keyboard_state: - self._keyboard_state[symbol] = False - - # But save desired state and note that we lost focus - # This will allow to reset the correct mode once we regain focus - self._exclusive_keyboard = exclusive_keyboard - self._exclusive_keyboard_focus = False - self._exclusive_mouse = exclusive_mouse - self._exclusive_mouse_focus = False - return 0 - - @Win32EventHandler(WM_GETMINMAXINFO) - def _event_getminmaxinfo(self, msg, wParam, lParam): - info = MINMAXINFO.from_address(lParam) - - if self._minimum_size: - info.ptMinTrackSize.x, info.ptMinTrackSize.y = \ - self._client_to_window_size(*self._minimum_size) - if self._maximum_size: - info.ptMaxTrackSize.x, info.ptMaxTrackSize.y = \ - self._client_to_window_size(*self._maximum_size) - - return 0 - - @Win32EventHandler(WM_ERASEBKGND) - def _event_erasebkgnd(self, msg, wParam, lParam): - # Prevent flicker during resize; but erase bkgnd if we're fullscreen. - if self._fullscreen: - return 0 - else: - return 1 - - @ViewEventHandler - @Win32EventHandler(WM_ERASEBKGND) - def _event_erasebkgnd_view(self, msg, wParam, lParam): - # Prevent flicker during resize. - return 1 - - @Win32EventHandler(WM_DROPFILES) - def _event_drop_files(self, msg, wParam, lParam): - drop = wParam - - # Get the count so we can handle multiple files. - file_count = _shell32.DragQueryFileW(drop, 0xFFFFFFFF, None, 0) - - # Get where drop point was. - point = POINT() - _shell32.DragQueryPoint(drop, ctypes.byref(point)) - - paths = [] - for i in range(file_count): - length = _shell32.DragQueryFileW(drop, i, None, 0) # Length of string. - - buffer = create_unicode_buffer(length+1) - - _shell32.DragQueryFileW(drop, i, buffer, length + 1) - - paths.append(buffer.value) - - _shell32.DragFinish(drop) - - # Reverse Y and call event. - self.dispatch_event('on_file_drop', point.x, self._height - point.y, paths) - return 0 - - -__all__ = ["Win32EventHandler", "Win32Window"] diff --git a/spaces/ahsansbaig/instructor_dashboard/README.md b/spaces/ahsansbaig/instructor_dashboard/README.md deleted file mode 100644 index ea337d888d74a9b8cb1e177136c6b72f202c46e1..0000000000000000000000000000000000000000 --- a/spaces/ahsansbaig/instructor_dashboard/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Test -emoji: 💻 -colorFrom: yellow -colorTo: yellow -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: other ---- \ No newline at end of file diff --git a/spaces/aiswaryamlds/YoutubeQA/README.md b/spaces/aiswaryamlds/YoutubeQA/README.md deleted file mode 100644 index 21e2f203892b11c7a28dad9379bd585d3774d263..0000000000000000000000000000000000000000 --- a/spaces/aiswaryamlds/YoutubeQA/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: YoutubeQA -emoji: 💻 -colorFrom: indigo -colorTo: gray -sdk: streamlit -sdk_version: 1.27.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/akhaliq/BlendGAN/README.md b/spaces/akhaliq/BlendGAN/README.md deleted file mode 100644 index 06817089de07bb28d673ea38e5fcd34fbaf32a09..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/BlendGAN/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: BlendGAN -emoji: 🌍 -colorFrom: yellow -colorTo: gray -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/akhaliq/Mask2Former/mask2former_video/utils/__init__.py b/spaces/akhaliq/Mask2Former/mask2former_video/utils/__init__.py deleted file mode 100644 index 9020c2df23e2af280b7bb168b996ae9eaf312eb8..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Mask2Former/mask2former_video/utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. diff --git a/spaces/akhaliq/PaintTransformer/train/options/train_options.py b/spaces/akhaliq/PaintTransformer/train/options/train_options.py deleted file mode 100644 index 69e4a1ab5a2c3a1e2707a11d97fc8dbd7920b50a..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/PaintTransformer/train/options/train_options.py +++ /dev/null @@ -1,52 +0,0 @@ -from .base_options import BaseOptions - - -class TrainOptions(BaseOptions): - """This class includes training options. - - It also includes shared options defined in BaseOptions. - """ - - def initialize(self, parser): - parser = BaseOptions.initialize(self, parser) - # visdom and HTML visualization parameters - parser.add_argument('--display_freq', type=int, default=40, - help='frequency of showing training results on screen') - parser.add_argument('--display_ncols', type=int, default=4, - help='if positive, display all images in a single visdom web panel ' - 'with certain number of images per row.') - parser.add_argument('--display_id', type=int, default=1, help='window id of the web display') - parser.add_argument('--display_server', type=str, default="http://localhost", - help='visdom server of the web display') - parser.add_argument('--display_env', type=str, default='main', - help='visdom display environment name (default is "main")') - parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display') - parser.add_argument('--update_html_freq', type=int, default=1000, - help='frequency of saving training results to html') - parser.add_argument('--print_freq', type=int, default=10, - help='frequency of showing training results on console') - parser.add_argument('--no_html', action='store_true', - help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') - # network saving and loading parameters - parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results') - parser.add_argument('--save_epoch_freq', type=int, default=5, - help='frequency of saving checkpoints at the end of epochs') - parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration') - parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') - parser.add_argument('--epoch_count', type=int, default=1, - help='the starting epoch count, we save the model ' - 'by , +, ...') - parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') - # training parameters - parser.add_argument('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate') - parser.add_argument('--n_epochs_decay', type=int, default=100, - help='number of epochs to linearly decay learning rate to zero') - parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') - parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') - parser.add_argument('--lr_policy', type=str, default='linear', - help='learning rate policy. [linear | step | plateau | cosine]') - parser.add_argument('--lr_decay_iters', type=int, default=50, - help='multiply by a gamma every lr_decay_iters iterations') - - self.isTrain = True - return parser diff --git a/spaces/akhaliq/SOAT/app.py b/spaces/akhaliq/SOAT/app.py deleted file mode 100644 index 0baeb6c65802c523429f0fbc513668c534680807..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/SOAT/app.py +++ /dev/null @@ -1,119 +0,0 @@ -import os -os.system("git clone https://github.com/mchong6/SOAT.git") -import sys -sys.path.append("SOAT") -import os -import torch -import torchvision -from torch import nn -import numpy as np -import torch.backends.cudnn as cudnn -cudnn.benchmark = True - -import math -import matplotlib.pyplot as plt -import torch.nn.functional as F -from model import * -from tqdm import tqdm as tqdm -import pickle -from copy import deepcopy -import warnings -warnings.filterwarnings("ignore", category=UserWarning) # get rid of interpolation warning -import kornia.filters as k -from torchvision.utils import save_image -from util import * -import scipy - -import gradio as gr - -import PIL - -from torchvision import transforms - -device = 'cpu' #@param ['cuda', 'cpu'] - -generator = Generator(256, 512, 8, channel_multiplier=2).eval().to(device) -truncation = 0.7 - -def display_image(image, size=None, mode='nearest', unnorm=False, title=''): - # image is [3,h,w] or [1,3,h,w] tensor [0,1] - if image.is_cuda: - image = image.cpu() - if size is not None and image.size(-1) != size: - image = F.interpolate(image, size=(size,size), mode=mode) - if image.dim() == 4: - image = image[0] - image = ((image.clamp(-1,1)+1)/2).permute(1, 2, 0).detach().numpy() - return image - - -#mean_latentland = load_model(generator, 'landscape.pt') -#mean_latentface = load_model(generator, 'face.pt') -#mean_latentchurch = load_model(generator, 'church.pt') - - -def inferece(num, seed): - mean_latent = load_model(generator, 'landscape.pt') - - num_im = int(num) - random_seed = int(seed) - - plt.rcParams['figure.dpi'] = 300 - - - # pad determines how much of an image is involve in the blending - pad = 512//4 - - all_im = [] - - random_state = np.random.RandomState(random_seed) - - # latent smoothing - with torch.no_grad(): - z = random_state.randn(num_im, 512).astype(np.float32) - z = scipy.ndimage.gaussian_filter(z, [.7, 0], mode='wrap') - z /= np.sqrt(np.mean(np.square(z))) - z = torch.from_numpy(z).to(device) - - source = generator.get_latent(z, truncation=truncation, mean_latent=mean_latent) - - # merge images 2 at a time - for i in range(num_im-1): - source1 = index_layers(source, i) - source2 = index_layers(source, i+1) - all_im.append(generator.merge_extension(source1, source2)) - - # display intermediate generations - # for i in all_im: - # display_image(i) - - - b,c,h,w = all_im[0].shape - panorama_im = torch.zeros(b,c,h,512+(num_im-2)*256) - - # We created a series of 2-blended images which we can overlay to form a large panorama - # add first image - coord = 256+pad - panorama_im[..., :coord] = all_im[0][..., :coord] - - for im in all_im[1:]: - panorama_im[..., coord:coord+512-2*pad] = im[..., pad:-pad] - coord += 512-2*pad - panorama_im[..., coord:] = all_im[-1][..., 512-pad:] - - img = display_image(panorama_im) - return img - -title = "SOAT" -description = "Gradio demo for SOAT Panorama Generaton for landscapes. Generate a panorama using a pretrained stylegan by stitching intermediate activations. To use it, simply add the number of images and random seed number . Read more at the links below." -article = "

StyleGAN of All Trades: Image Manipulation with Only Pretrained StyleGAN | Github Repo

" - -gr.Interface( - inferece, - [gr.inputs.Number(default=5, label="Number of Images") -,gr.inputs.Number(default=90, label="Random Seed") -], - gr.outputs.Image(type="numpy", label="Output"), - title=title, - description=description, - article=article, theme="huggingface",enable_queue=True).launch(debug=True) \ No newline at end of file diff --git a/spaces/akhaliq/cool-japan-diffusion-2-1-0/app.py b/spaces/akhaliq/cool-japan-diffusion-2-1-0/app.py deleted file mode 100644 index 5219ac1b1d921e7104447adc38cbeda370fd3603..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/cool-japan-diffusion-2-1-0/app.py +++ /dev/null @@ -1,137 +0,0 @@ -from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler -import gradio as gr -import torch -from PIL import Image - -model_id = 'aipicasso/cool-japan-diffusion-2-1-0' -prefix = '' - -scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler") - -pipe = StableDiffusionPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - scheduler=scheduler) - -pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - scheduler=scheduler) - -if torch.cuda.is_available(): - pipe = pipe.to("cuda") - pipe_i2i = pipe_i2i.to("cuda") - -def error_str(error, title="Error"): - return f"""#### {title} - {error}""" if error else "" - -def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=False): - - generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None - prompt = f"{prefix} {prompt}" if auto_prefix else prompt - - try: - if img is not None: - return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None - else: - return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None - except Exception as e: - return None, error_str(e) - -def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator): - - result = pipe( - prompt, - negative_prompt = neg_prompt, - num_inference_steps = int(steps), - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return result.images[0] - -def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator): - - ratio = min(height / img.height, width / img.width) - img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS) - result = pipe_i2i( - prompt, - negative_prompt = neg_prompt, - init_image = img, - num_inference_steps = int(steps), - strength = strength, - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return result.images[0] - -css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem} -""" -with gr.Blocks(css=css) as demo: - gr.HTML( - f""" -
-
-

Cool Japan Diffusion 2 1 0

-
-

- Demo for Cool Japan Diffusion 2 1 0 Stable Diffusion model.
- {"Add the following tokens to your prompts for the model to work properly: prefix" if prefix else ""} -

- Running on {"GPU 🔥" if torch.cuda.is_available() else f"CPU 🥶. For faster inference it is recommended to upgrade to GPU in Settings"} after duplicating the space

- Duplicate Space -
- """ - ) - with gr.Row(): - - with gr.Column(scale=55): - with gr.Group(): - with gr.Row(): - prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder=f"{prefix} [your prompt]").style(container=False) - generate = gr.Button(value="Generate").style(rounded=(False, True, True, False)) - - image_out = gr.Image(height=512) - error_output = gr.Markdown() - - with gr.Column(scale=45): - with gr.Tab("Options"): - with gr.Group(): - neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image") - auto_prefix = gr.Checkbox(label="Prefix styling tokens automatically ()", value=prefix, visible=prefix) - - with gr.Row(): - guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15) - steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1) - - with gr.Row(): - width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8) - height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8) - - seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1) - - with gr.Tab("Image to image"): - with gr.Group(): - image = gr.Image(label="Image", height=256, tool="editor", type="pil") - strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5) - - auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else "[Your prompt]"), inputs=auto_prefix, outputs=prompt, queue=False) - - inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix] - outputs = [image_out, error_output] - prompt.submit(inference, inputs=inputs, outputs=outputs) - generate.click(inference, inputs=inputs, outputs=outputs) - - gr.HTML(""" -
-
-

This space was created using SD Space Creator.

-
- """) - -demo.queue(concurrency_count=1) -demo.launch() diff --git a/spaces/akhaliq/neural-waveshaping-synthesis/setup.py b/spaces/akhaliq/neural-waveshaping-synthesis/setup.py deleted file mode 100644 index 01de9b86fb6c7a8bc7f1a49a8997ccf01d028f14..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/neural-waveshaping-synthesis/setup.py +++ /dev/null @@ -1,3 +0,0 @@ -from setuptools import setup, find_packages - -setup(name="neural_waveshaping_synthesis", version="0.0.1", packages=find_packages()) diff --git a/spaces/alexray/btc_predictor/data_creation.py b/spaces/alexray/btc_predictor/data_creation.py deleted file mode 100644 index b37985d7c676867cf1c5d5adc8764a16ad3ce17b..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/data_creation.py +++ /dev/null @@ -1,50 +0,0 @@ -import yfinance as yf -import pandas as pd -import os - - -def create_data(): - - os.makedirs("data", exist_ok=True) - - btc_ticker = yf.Ticker("BTC-USD") - - df = btc_ticker.history(period="max") - - df.index = pd.to_datetime(df.index).date - df.index = pd.to_datetime(df.index) - - del df["Dividends"] - del df["Stock Splits"] - - df.columns = [c.lower() for c in df.columns] - - # Create target feature - df['Tomorrow'] = df['close'].shift(-1) - df['target'] = df['Tomorrow'].pct_change() * 100 - df = df.drop(['Tomorrow'], axis=1) - - assets = ( - "^GSPC ^DJI ^N225 ^N100 000001.SS " - "CL=F GC=F HG=F NVDA AAPL" - ) - - additional_data = yf.download(assets, start="2014-09-17") - - df_add = additional_data.Close - - df_add = df_add.fillna(method='ffill') - - df_ = df.merge(df_add, left_index=True, right_index=True, how='left') - - df_ = df_.fillna(method='ffill') - - df_.dropna(inplace=True) - - df_.to_csv("data/assets_data.csv") - - return df_ - - -if __name__ == '__main__': - create_data() diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/resolution/legacy/resolver.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/resolution/legacy/resolver.py deleted file mode 100644 index 8c149d437d749d1317dbf3d0f958d02cb8ccbee9..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/resolution/legacy/resolver.py +++ /dev/null @@ -1,467 +0,0 @@ -"""Dependency Resolution - -The dependency resolution in pip is performed as follows: - -for top-level requirements: - a. only one spec allowed per project, regardless of conflicts or not. - otherwise a "double requirement" exception is raised - b. they override sub-dependency requirements. -for sub-dependencies - a. "first found, wins" (where the order is breadth first) -""" - -# The following comment should be removed at some point in the future. -# mypy: strict-optional=False - -import logging -import sys -from collections import defaultdict -from itertools import chain -from typing import DefaultDict, Iterable, List, Optional, Set, Tuple - -from pip._vendor.packaging import specifiers -from pip._vendor.packaging.requirements import Requirement - -from pip._internal.cache import WheelCache -from pip._internal.exceptions import ( - BestVersionAlreadyInstalled, - DistributionNotFound, - HashError, - HashErrors, - NoneMetadataError, - UnsupportedPythonVersion, -) -from pip._internal.index.package_finder import PackageFinder -from pip._internal.metadata import BaseDistribution -from pip._internal.models.link import Link -from pip._internal.operations.prepare import RequirementPreparer -from pip._internal.req.req_install import ( - InstallRequirement, - check_invalid_constraint_type, -) -from pip._internal.req.req_set import RequirementSet -from pip._internal.resolution.base import BaseResolver, InstallRequirementProvider -from pip._internal.utils.compatibility_tags import get_supported -from pip._internal.utils.logging import indent_log -from pip._internal.utils.misc import normalize_version_info -from pip._internal.utils.packaging import check_requires_python - -logger = logging.getLogger(__name__) - -DiscoveredDependencies = DefaultDict[str, List[InstallRequirement]] - - -def _check_dist_requires_python( - dist: BaseDistribution, - version_info: Tuple[int, int, int], - ignore_requires_python: bool = False, -) -> None: - """ - Check whether the given Python version is compatible with a distribution's - "Requires-Python" value. - - :param version_info: A 3-tuple of ints representing the Python - major-minor-micro version to check. - :param ignore_requires_python: Whether to ignore the "Requires-Python" - value if the given Python version isn't compatible. - - :raises UnsupportedPythonVersion: When the given Python version isn't - compatible. - """ - # This idiosyncratically converts the SpecifierSet to str and let - # check_requires_python then parse it again into SpecifierSet. But this - # is the legacy resolver so I'm just not going to bother refactoring. - try: - requires_python = str(dist.requires_python) - except FileNotFoundError as e: - raise NoneMetadataError(dist, str(e)) - try: - is_compatible = check_requires_python( - requires_python, - version_info=version_info, - ) - except specifiers.InvalidSpecifier as exc: - logger.warning( - "Package %r has an invalid Requires-Python: %s", dist.raw_name, exc - ) - return - - if is_compatible: - return - - version = ".".join(map(str, version_info)) - if ignore_requires_python: - logger.debug( - "Ignoring failed Requires-Python check for package %r: %s not in %r", - dist.raw_name, - version, - requires_python, - ) - return - - raise UnsupportedPythonVersion( - "Package {!r} requires a different Python: {} not in {!r}".format( - dist.raw_name, version, requires_python - ) - ) - - -class Resolver(BaseResolver): - """Resolves which packages need to be installed/uninstalled to perform \ - the requested operation without breaking the requirements of any package. - """ - - _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"} - - def __init__( - self, - preparer: RequirementPreparer, - finder: PackageFinder, - wheel_cache: Optional[WheelCache], - make_install_req: InstallRequirementProvider, - use_user_site: bool, - ignore_dependencies: bool, - ignore_installed: bool, - ignore_requires_python: bool, - force_reinstall: bool, - upgrade_strategy: str, - py_version_info: Optional[Tuple[int, ...]] = None, - ) -> None: - super().__init__() - assert upgrade_strategy in self._allowed_strategies - - if py_version_info is None: - py_version_info = sys.version_info[:3] - else: - py_version_info = normalize_version_info(py_version_info) - - self._py_version_info = py_version_info - - self.preparer = preparer - self.finder = finder - self.wheel_cache = wheel_cache - - self.upgrade_strategy = upgrade_strategy - self.force_reinstall = force_reinstall - self.ignore_dependencies = ignore_dependencies - self.ignore_installed = ignore_installed - self.ignore_requires_python = ignore_requires_python - self.use_user_site = use_user_site - self._make_install_req = make_install_req - - self._discovered_dependencies: DiscoveredDependencies = defaultdict(list) - - def resolve( - self, root_reqs: List[InstallRequirement], check_supported_wheels: bool - ) -> RequirementSet: - """Resolve what operations need to be done - - As a side-effect of this method, the packages (and their dependencies) - are downloaded, unpacked and prepared for installation. This - preparation is done by ``pip.operations.prepare``. - - Once PyPI has static dependency metadata available, it would be - possible to move the preparation to become a step separated from - dependency resolution. - """ - requirement_set = RequirementSet(check_supported_wheels=check_supported_wheels) - for req in root_reqs: - if req.constraint: - check_invalid_constraint_type(req) - requirement_set.add_requirement(req) - - # Actually prepare the files, and collect any exceptions. Most hash - # exceptions cannot be checked ahead of time, because - # _populate_link() needs to be called before we can make decisions - # based on link type. - discovered_reqs: List[InstallRequirement] = [] - hash_errors = HashErrors() - for req in chain(requirement_set.all_requirements, discovered_reqs): - try: - discovered_reqs.extend(self._resolve_one(requirement_set, req)) - except HashError as exc: - exc.req = req - hash_errors.append(exc) - - if hash_errors: - raise hash_errors - - return requirement_set - - def _is_upgrade_allowed(self, req: InstallRequirement) -> bool: - if self.upgrade_strategy == "to-satisfy-only": - return False - elif self.upgrade_strategy == "eager": - return True - else: - assert self.upgrade_strategy == "only-if-needed" - return req.user_supplied or req.constraint - - def _set_req_to_reinstall(self, req: InstallRequirement) -> None: - """ - Set a requirement to be installed. - """ - # Don't uninstall the conflict if doing a user install and the - # conflict is not a user install. - if not self.use_user_site or req.satisfied_by.in_usersite: - req.should_reinstall = True - req.satisfied_by = None - - def _check_skip_installed( - self, req_to_install: InstallRequirement - ) -> Optional[str]: - """Check if req_to_install should be skipped. - - This will check if the req is installed, and whether we should upgrade - or reinstall it, taking into account all the relevant user options. - - After calling this req_to_install will only have satisfied_by set to - None if the req_to_install is to be upgraded/reinstalled etc. Any - other value will be a dist recording the current thing installed that - satisfies the requirement. - - Note that for vcs urls and the like we can't assess skipping in this - routine - we simply identify that we need to pull the thing down, - then later on it is pulled down and introspected to assess upgrade/ - reinstalls etc. - - :return: A text reason for why it was skipped, or None. - """ - if self.ignore_installed: - return None - - req_to_install.check_if_exists(self.use_user_site) - if not req_to_install.satisfied_by: - return None - - if self.force_reinstall: - self._set_req_to_reinstall(req_to_install) - return None - - if not self._is_upgrade_allowed(req_to_install): - if self.upgrade_strategy == "only-if-needed": - return "already satisfied, skipping upgrade" - return "already satisfied" - - # Check for the possibility of an upgrade. For link-based - # requirements we have to pull the tree down and inspect to assess - # the version #, so it's handled way down. - if not req_to_install.link: - try: - self.finder.find_requirement(req_to_install, upgrade=True) - except BestVersionAlreadyInstalled: - # Then the best version is installed. - return "already up-to-date" - except DistributionNotFound: - # No distribution found, so we squash the error. It will - # be raised later when we re-try later to do the install. - # Why don't we just raise here? - pass - - self._set_req_to_reinstall(req_to_install) - return None - - def _find_requirement_link(self, req: InstallRequirement) -> Optional[Link]: - upgrade = self._is_upgrade_allowed(req) - best_candidate = self.finder.find_requirement(req, upgrade) - if not best_candidate: - return None - - # Log a warning per PEP 592 if necessary before returning. - link = best_candidate.link - if link.is_yanked: - reason = link.yanked_reason or "" - msg = ( - # Mark this as a unicode string to prevent - # "UnicodeEncodeError: 'ascii' codec can't encode character" - # in Python 2 when the reason contains non-ascii characters. - "The candidate selected for download or install is a " - "yanked version: {candidate}\n" - "Reason for being yanked: {reason}" - ).format(candidate=best_candidate, reason=reason) - logger.warning(msg) - - return link - - def _populate_link(self, req: InstallRequirement) -> None: - """Ensure that if a link can be found for this, that it is found. - - Note that req.link may still be None - if the requirement is already - installed and not needed to be upgraded based on the return value of - _is_upgrade_allowed(). - - If preparer.require_hashes is True, don't use the wheel cache, because - cached wheels, always built locally, have different hashes than the - files downloaded from the index server and thus throw false hash - mismatches. Furthermore, cached wheels at present have undeterministic - contents due to file modification times. - """ - if req.link is None: - req.link = self._find_requirement_link(req) - - if self.wheel_cache is None or self.preparer.require_hashes: - return - cache_entry = self.wheel_cache.get_cache_entry( - link=req.link, - package_name=req.name, - supported_tags=get_supported(), - ) - if cache_entry is not None: - logger.debug("Using cached wheel link: %s", cache_entry.link) - if req.link is req.original_link and cache_entry.persistent: - req.original_link_is_in_wheel_cache = True - req.link = cache_entry.link - - def _get_dist_for(self, req: InstallRequirement) -> BaseDistribution: - """Takes a InstallRequirement and returns a single AbstractDist \ - representing a prepared variant of the same. - """ - if req.editable: - return self.preparer.prepare_editable_requirement(req) - - # satisfied_by is only evaluated by calling _check_skip_installed, - # so it must be None here. - assert req.satisfied_by is None - skip_reason = self._check_skip_installed(req) - - if req.satisfied_by: - return self.preparer.prepare_installed_requirement(req, skip_reason) - - # We eagerly populate the link, since that's our "legacy" behavior. - self._populate_link(req) - dist = self.preparer.prepare_linked_requirement(req) - - # NOTE - # The following portion is for determining if a certain package is - # going to be re-installed/upgraded or not and reporting to the user. - # This should probably get cleaned up in a future refactor. - - # req.req is only avail after unpack for URL - # pkgs repeat check_if_exists to uninstall-on-upgrade - # (#14) - if not self.ignore_installed: - req.check_if_exists(self.use_user_site) - - if req.satisfied_by: - should_modify = ( - self.upgrade_strategy != "to-satisfy-only" - or self.force_reinstall - or self.ignore_installed - or req.link.scheme == "file" - ) - if should_modify: - self._set_req_to_reinstall(req) - else: - logger.info( - "Requirement already satisfied (use --upgrade to upgrade): %s", - req, - ) - return dist - - def _resolve_one( - self, - requirement_set: RequirementSet, - req_to_install: InstallRequirement, - ) -> List[InstallRequirement]: - """Prepare a single requirements file. - - :return: A list of additional InstallRequirements to also install. - """ - # Tell user what we are doing for this requirement: - # obtain (editable), skipping, processing (local url), collecting - # (remote url or package name) - if req_to_install.constraint or req_to_install.prepared: - return [] - - req_to_install.prepared = True - - # Parse and return dependencies - dist = self._get_dist_for(req_to_install) - # This will raise UnsupportedPythonVersion if the given Python - # version isn't compatible with the distribution's Requires-Python. - _check_dist_requires_python( - dist, - version_info=self._py_version_info, - ignore_requires_python=self.ignore_requires_python, - ) - - more_reqs: List[InstallRequirement] = [] - - def add_req(subreq: Requirement, extras_requested: Iterable[str]) -> None: - # This idiosyncratically converts the Requirement to str and let - # make_install_req then parse it again into Requirement. But this is - # the legacy resolver so I'm just not going to bother refactoring. - sub_install_req = self._make_install_req(str(subreq), req_to_install) - parent_req_name = req_to_install.name - to_scan_again, add_to_parent = requirement_set.add_requirement( - sub_install_req, - parent_req_name=parent_req_name, - extras_requested=extras_requested, - ) - if parent_req_name and add_to_parent: - self._discovered_dependencies[parent_req_name].append(add_to_parent) - more_reqs.extend(to_scan_again) - - with indent_log(): - # We add req_to_install before its dependencies, so that we - # can refer to it when adding dependencies. - if not requirement_set.has_requirement(req_to_install.name): - # 'unnamed' requirements will get added here - # 'unnamed' requirements can only come from being directly - # provided by the user. - assert req_to_install.user_supplied - requirement_set.add_requirement(req_to_install, parent_req_name=None) - - if not self.ignore_dependencies: - if req_to_install.extras: - logger.debug( - "Installing extra requirements: %r", - ",".join(req_to_install.extras), - ) - missing_requested = sorted( - set(req_to_install.extras) - set(dist.iter_provided_extras()) - ) - for missing in missing_requested: - logger.warning( - "%s %s does not provide the extra '%s'", - dist.raw_name, - dist.version, - missing, - ) - - available_requested = sorted( - set(dist.iter_provided_extras()) & set(req_to_install.extras) - ) - for subreq in dist.iter_dependencies(available_requested): - add_req(subreq, extras_requested=available_requested) - - return more_reqs - - def get_installation_order( - self, req_set: RequirementSet - ) -> List[InstallRequirement]: - """Create the installation order. - - The installation order is topological - requirements are installed - before the requiring thing. We break cycles at an arbitrary point, - and make no other guarantees. - """ - # The current implementation, which we may change at any point - # installs the user specified things in the order given, except when - # dependencies must come earlier to achieve topological order. - order = [] - ordered_reqs: Set[InstallRequirement] = set() - - def schedule(req: InstallRequirement) -> None: - if req.satisfied_by or req in ordered_reqs: - return - if req.constraint: - return - ordered_reqs.add(req) - for dep in self._discovered_dependencies[req.name]: - schedule(dep) - order.append(req) - - for install_req in req_set.requirements.values(): - schedule(install_req) - return order diff --git a/spaces/allknowingroger/Image-Models-Test100/app.py b/spaces/allknowingroger/Image-Models-Test100/app.py deleted file mode 100644 index f54c0f88467df698da8acf76ccf251ea9b74f073..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test100/app.py +++ /dev/null @@ -1,144 +0,0 @@ -import gradio as gr -# import os -# import sys -# from pathlib import Path -import time - -models =[ - "digiplay/HIJKLMix_v2", - "digiplay/MGM", - "sweetpablo/diffuwusion", - "faizonly5953/freya", - "sunyijia97/lora-trained-xl-colab-doll", - "kangsul/sd-pokemon-model-lora-sdxl", - "Sans404/somethingidk", - "pavani8/my-pet-dog", - "goofyai/Leonardo_Ai_Style_Illustration", -] - - -model_functions = {} -model_idx = 1 -for model_path in models: - try: - model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False) - except Exception as error: - def the_fn(txt): - return None - model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"]) - model_idx+=1 - - -def send_it_idx(idx): - def send_it_fn(prompt): - output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt) - return output - return send_it_fn - -def get_prompts(prompt_text): - return prompt_text - -def clear_it(val): - if int(val) != 0: - val = 0 - else: - val = 0 - pass - return val - -def all_task_end(cnt,t_stamp): - to = t_stamp + 60 - et = time.time() - if et > to and t_stamp != 0: - d = gr.update(value=0) - tog = gr.update(value=1) - #print(f'to: {to} et: {et}') - else: - if cnt != 0: - d = gr.update(value=et) - else: - d = gr.update(value=0) - tog = gr.update(value=0) - #print (f'passing: to: {to} et: {et}') - pass - return d, tog - -def all_task_start(): - print("\n\n\n\n\n\n\n") - t = time.gmtime() - t_stamp = time.time() - current_time = time.strftime("%H:%M:%S", t) - return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0) - -def clear_fn(): - nn = len(models) - return tuple([None, *[None for _ in range(nn)]]) - - - -with gr.Blocks(title="SD Models") as my_interface: - with gr.Column(scale=12): - # with gr.Row(): - # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""") - with gr.Row(): - with gr.Row(scale=6): - primary_prompt=gr.Textbox(label="Prompt", value="") - # real_prompt=gr.Textbox(label="Real prompt") - with gr.Row(scale=6): - # improve_prompts_btn=gr.Button("Improve") - with gr.Row(): - run=gr.Button("Run",variant="primary") - clear_btn=gr.Button("Clear") - with gr.Row(): - sd_outputs = {} - model_idx = 1 - for model_path in models: - with gr.Column(scale=3, min_width=320): - with gr.Box(): - sd_outputs[model_idx] = gr.Image(label=model_path) - pass - model_idx += 1 - pass - pass - - with gr.Row(visible=False): - start_box=gr.Number(interactive=False) - end_box=gr.Number(interactive=False) - tog_box=gr.Textbox(value=0,interactive=False) - - start_box.change( - all_task_end, - [start_box, end_box], - [start_box, tog_box], - every=1, - show_progress=False) - - primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box]) - run.click(all_task_start, None, [start_box, end_box, tog_box]) - runs_dict = {} - model_idx = 1 - for model_path in models: - runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]]) - model_idx += 1 - pass - pass - - # improve_prompts_btn_clicked=improve_prompts_btn.click( - # get_prompts, - # inputs=[primary_prompt], - # outputs=[primary_prompt], - # cancels=list(runs_dict.values())) - clear_btn.click( - clear_fn, - None, - [primary_prompt, *list(sd_outputs.values())], - cancels=[*list(runs_dict.values())]) - tog_box.change( - clear_it, - tog_box, - tog_box, - cancels=[*list(runs_dict.values())]) - -my_interface.queue(concurrency_count=600, status_update_rate=1) -my_interface.launch(inline=True, show_api=False) - \ No newline at end of file diff --git a/spaces/allknowingroger/Image-Models-Test58/README.md b/spaces/allknowingroger/Image-Models-Test58/README.md deleted file mode 100644 index 56d7cc54e164515e01fd9d3a20a63de317f06ece..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test58/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Image Models -emoji: 👀 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: true -duplicated_from: allknowingroger/Image-Models-Test57 ---- - - \ No newline at end of file diff --git a/spaces/amankishore/sjc/sd1/ldm/models/diffusion/__init__.py b/spaces/amankishore/sjc/sd1/ldm/models/diffusion/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/src/common/pa_ringbuffer.c b/spaces/amarchheda/ChordDuplicate/portaudio/src/common/pa_ringbuffer.c deleted file mode 100644 index b978d54f195c3a898b5fab79159072b28d6a1a1b..0000000000000000000000000000000000000000 --- a/spaces/amarchheda/ChordDuplicate/portaudio/src/common/pa_ringbuffer.c +++ /dev/null @@ -1,237 +0,0 @@ -/* - * $Id$ - * Portable Audio I/O Library - * Ring Buffer utility. - * - * Author: Phil Burk, http://www.softsynth.com - * modified for SMP safety on Mac OS X by Bjorn Roche - * modified for SMP safety on Linux by Leland Lucius - * also, allowed for const where possible - * modified for multiple-byte-sized data elements by Sven Fischer - * - * Note that this is safe only for a single-thread reader and a - * single-thread writer. - * - * This program uses the PortAudio Portable Audio Library. - * For more information see: http://www.portaudio.com - * Copyright (c) 1999-2000 Ross Bencina and Phil Burk - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -/** - @file - @ingroup common_src -*/ - -#include -#include -#include -#include "pa_ringbuffer.h" -#include -#include "pa_memorybarrier.h" - -/*************************************************************************** - * Initialize FIFO. - * elementCount must be power of 2, returns -1 if not. - */ -ring_buffer_size_t PaUtil_InitializeRingBuffer( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementSizeBytes, ring_buffer_size_t elementCount, void *dataPtr ) -{ - if( ((elementCount-1) & elementCount) != 0) return -1; /* Not Power of two. */ - rbuf->bufferSize = elementCount; - rbuf->buffer = (char *)dataPtr; - PaUtil_FlushRingBuffer( rbuf ); - rbuf->bigMask = (elementCount*2)-1; - rbuf->smallMask = (elementCount)-1; - rbuf->elementSizeBytes = elementSizeBytes; - return 0; -} - -/*************************************************************************** -** Return number of elements available for reading. */ -ring_buffer_size_t PaUtil_GetRingBufferReadAvailable( const PaUtilRingBuffer *rbuf ) -{ - return ( (rbuf->writeIndex - rbuf->readIndex) & rbuf->bigMask ); -} -/*************************************************************************** -** Return number of elements available for writing. */ -ring_buffer_size_t PaUtil_GetRingBufferWriteAvailable( const PaUtilRingBuffer *rbuf ) -{ - return ( rbuf->bufferSize - PaUtil_GetRingBufferReadAvailable(rbuf)); -} - -/*************************************************************************** -** Clear buffer. Should only be called when buffer is NOT being read or written. */ -void PaUtil_FlushRingBuffer( PaUtilRingBuffer *rbuf ) -{ - rbuf->writeIndex = rbuf->readIndex = 0; -} - -/*************************************************************************** -** Get address of region(s) to which we can write data. -** If the region is contiguous, size2 will be zero. -** If non-contiguous, size2 will be the size of second region. -** Returns room available to be written or elementCount, whichever is smaller. -*/ -ring_buffer_size_t PaUtil_GetRingBufferWriteRegions( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount, - void **dataPtr1, ring_buffer_size_t *sizePtr1, - void **dataPtr2, ring_buffer_size_t *sizePtr2 ) -{ - ring_buffer_size_t index; - ring_buffer_size_t available = PaUtil_GetRingBufferWriteAvailable( rbuf ); - if( elementCount > available ) elementCount = available; - /* Check to see if write is not contiguous. */ - index = rbuf->writeIndex & rbuf->smallMask; - if( (index + elementCount) > rbuf->bufferSize ) - { - /* Write data in two blocks that wrap the buffer. */ - ring_buffer_size_t firstHalf = rbuf->bufferSize - index; - *dataPtr1 = &rbuf->buffer[index*rbuf->elementSizeBytes]; - *sizePtr1 = firstHalf; - *dataPtr2 = &rbuf->buffer[0]; - *sizePtr2 = elementCount - firstHalf; - } - else - { - *dataPtr1 = &rbuf->buffer[index*rbuf->elementSizeBytes]; - *sizePtr1 = elementCount; - *dataPtr2 = NULL; - *sizePtr2 = 0; - } - - if( available ) - PaUtil_FullMemoryBarrier(); /* (write-after-read) => full barrier */ - - return elementCount; -} - - -/*************************************************************************** -*/ -ring_buffer_size_t PaUtil_AdvanceRingBufferWriteIndex( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount ) -{ - /* ensure that previous writes are seen before we update the write index - (write after write) - */ - PaUtil_WriteMemoryBarrier(); - return rbuf->writeIndex = (rbuf->writeIndex + elementCount) & rbuf->bigMask; -} - -/*************************************************************************** -** Get address of region(s) from which we can read data. -** If the region is contiguous, size2 will be zero. -** If non-contiguous, size2 will be the size of second region. -** Returns room available to be read or elementCount, whichever is smaller. -*/ -ring_buffer_size_t PaUtil_GetRingBufferReadRegions( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount, - void **dataPtr1, ring_buffer_size_t *sizePtr1, - void **dataPtr2, ring_buffer_size_t *sizePtr2 ) -{ - ring_buffer_size_t index; - ring_buffer_size_t available = PaUtil_GetRingBufferReadAvailable( rbuf ); /* doesn't use memory barrier */ - if( elementCount > available ) elementCount = available; - /* Check to see if read is not contiguous. */ - index = rbuf->readIndex & rbuf->smallMask; - if( (index + elementCount) > rbuf->bufferSize ) - { - /* Write data in two blocks that wrap the buffer. */ - ring_buffer_size_t firstHalf = rbuf->bufferSize - index; - *dataPtr1 = &rbuf->buffer[index*rbuf->elementSizeBytes]; - *sizePtr1 = firstHalf; - *dataPtr2 = &rbuf->buffer[0]; - *sizePtr2 = elementCount - firstHalf; - } - else - { - *dataPtr1 = &rbuf->buffer[index*rbuf->elementSizeBytes]; - *sizePtr1 = elementCount; - *dataPtr2 = NULL; - *sizePtr2 = 0; - } - - if( available ) - PaUtil_ReadMemoryBarrier(); /* (read-after-read) => read barrier */ - - return elementCount; -} -/*************************************************************************** -*/ -ring_buffer_size_t PaUtil_AdvanceRingBufferReadIndex( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount ) -{ - /* ensure that previous reads (copies out of the ring buffer) are always completed before updating (writing) the read index. - (write-after-read) => full barrier - */ - PaUtil_FullMemoryBarrier(); - return rbuf->readIndex = (rbuf->readIndex + elementCount) & rbuf->bigMask; -} - -/*************************************************************************** -** Return elements written. */ -ring_buffer_size_t PaUtil_WriteRingBuffer( PaUtilRingBuffer *rbuf, const void *data, ring_buffer_size_t elementCount ) -{ - ring_buffer_size_t size1, size2, numWritten; - void *data1, *data2; - numWritten = PaUtil_GetRingBufferWriteRegions( rbuf, elementCount, &data1, &size1, &data2, &size2 ); - if( size2 > 0 ) - { - - memcpy( data1, data, size1*rbuf->elementSizeBytes ); - data = ((char *)data) + size1*rbuf->elementSizeBytes; - memcpy( data2, data, size2*rbuf->elementSizeBytes ); - } - else - { - memcpy( data1, data, size1*rbuf->elementSizeBytes ); - } - PaUtil_AdvanceRingBufferWriteIndex( rbuf, numWritten ); - return numWritten; -} - -/*************************************************************************** -** Return elements read. */ -ring_buffer_size_t PaUtil_ReadRingBuffer( PaUtilRingBuffer *rbuf, void *data, ring_buffer_size_t elementCount ) -{ - ring_buffer_size_t size1, size2, numRead; - void *data1, *data2; - numRead = PaUtil_GetRingBufferReadRegions( rbuf, elementCount, &data1, &size1, &data2, &size2 ); - if( size2 > 0 ) - { - memcpy( data, data1, size1*rbuf->elementSizeBytes ); - data = ((char *)data) + size1*rbuf->elementSizeBytes; - memcpy( data, data2, size2*rbuf->elementSizeBytes ); - } - else - { - memcpy( data, data1, size1*rbuf->elementSizeBytes ); - } - PaUtil_AdvanceRingBufferReadIndex( rbuf, numRead ); - return numRead; -} diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/src/hostapi/asio/pa_asio.cpp b/spaces/amarchheda/ChordDuplicate/portaudio/src/hostapi/asio/pa_asio.cpp deleted file mode 100644 index bc5f0e415098142a6c98b155ac5a696880544132..0000000000000000000000000000000000000000 --- a/spaces/amarchheda/ChordDuplicate/portaudio/src/hostapi/asio/pa_asio.cpp +++ /dev/null @@ -1,4250 +0,0 @@ -/* - * $Id$ - * Portable Audio I/O Library for ASIO Drivers - * - * Author: Stephane Letz - * Based on the Open Source API proposed by Ross Bencina - * Copyright (c) 2000-2002 Stephane Letz, Phil Burk, Ross Bencina - * Blocking i/o implementation by Sven Fischer, Institute of Hearing - * Technology and Audiology (www.hoertechnik-audiologie.de) - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -/* Modification History - - 08-03-01 First version : Stephane Letz - 08-06-01 Tweaks for PC, use C++, buffer allocation, Float32 to Int32 conversion : Phil Burk - 08-20-01 More conversion, PA_StreamTime, Pa_GetHostError : Stephane Letz - 08-21-01 PaUInt8 bug correction, implementation of ASIOSTFloat32LSB and ASIOSTFloat32MSB native formats : Stephane Letz - 08-24-01 MAX_INT32_FP hack, another Uint8 fix : Stephane and Phil - 08-27-01 Implementation of hostBufferSize < userBufferSize case, better management of the output buffer when - the stream is stopped : Stephane Letz - 08-28-01 Check the stream pointer for null in bufferSwitchTimeInfo, correct bug in bufferSwitchTimeInfo when - the stream is stopped : Stephane Letz - 10-12-01 Correct the PaHost_CalcNumHostBuffers function: computes FramesPerHostBuffer to be the lowest that - respect requested FramesPerUserBuffer and userBuffersPerHostBuffer : Stephane Letz - 10-26-01 Management of hostBufferSize and userBufferSize of any size : Stephane Letz - 10-27-01 Improve calculus of hostBufferSize to be multiple or divisor of userBufferSize if possible : Stephane and Phil - 10-29-01 Change MAX_INT32_FP to (2147483520.0f) to prevent roundup to 0x80000000 : Phil Burk - 10-31-01 Clear the output buffer and user buffers in PaHost_StartOutput, correct bug in GetFirstMultiple : Stephane Letz - 11-06-01 Rename functions : Stephane Letz - 11-08-01 New Pa_ASIO_Adaptor_Init function to init Callback adpatation variables, cleanup of Pa_ASIO_Callback_Input: Stephane Letz - 11-29-01 Break apart device loading to debug random failure in Pa_ASIO_QueryDeviceInfo ; Phil Burk - 01-03-02 Deallocate all resources in PaHost_Term for cases where Pa_CloseStream is not called properly : Stephane Letz - 02-01-02 Cleanup, test of multiple-stream opening : Stephane Letz - 19-02-02 New Pa_ASIO_loadDriver that calls CoInitialize on each thread on Windows : Stephane Letz - 09-04-02 Correct error code management in PaHost_Term, removes various compiler warning : Stephane Letz - 12-04-02 Add Mac includes for and : Phil Burk - 13-04-02 Removes another compiler warning : Stephane Letz - 30-04-02 Pa_ASIO_QueryDeviceInfo bug correction, memory allocation checking, better error handling : D Viens, P Burk, S Letz - 12-06-02 Rehashed into new multi-api infrastructure, added support for all ASIO sample formats : Ross Bencina - 18-06-02 Added pa_asio.h, PaAsio_GetAvailableLatencyValues() : Ross B. - 21-06-02 Added SelectHostBufferSize() which selects host buffer size based on user latency parameters : Ross Bencina - ** NOTE maintenance history is now stored in CVS ** -*/ - -/** @file - @ingroup hostapi_src - - Note that specific support for paInputUnderflow, paOutputOverflow and - paNeverDropInput is not necessary or possible with this driver due to the - synchronous full duplex double-buffered architecture of ASIO. -*/ - - -#include -#include -#include -//#include -#include - -#include -#include - -#include "portaudio.h" -#include "pa_asio.h" -#include "pa_util.h" -#include "pa_allocation.h" -#include "pa_hostapi.h" -#include "pa_stream.h" -#include "pa_cpuload.h" -#include "pa_process.h" -#include "pa_debugprint.h" -#include "pa_ringbuffer.h" - -#include "pa_win_coinitialize.h" - -/* This version of pa_asio.cpp is currently only targeted at Win32, - It would require a few tweaks to work with pre-OS X Macintosh. - To make configuration easier, we define WIN32 here to make sure - that the ASIO SDK knows this is Win32. -*/ -#ifndef WIN32 -#define WIN32 -#endif - -#include "asiosys.h" -#include "asio.h" -#include "asiodrivers.h" -#include "iasiothiscallresolver.h" - -/* -#if MAC -#include -#include -#include -#else -*/ -/* -#include -#include -#include -*/ -/* -#endif -*/ - - -/* winmm.lib is needed for timeGetTime() (this is in winmm.a if you're using gcc) */ -#if (defined(WIN32) && (defined(_MSC_VER) && (_MSC_VER >= 1200))) /* MSC version 6 and above */ -#pragma comment(lib, "winmm.lib") -#endif - - -/* external reference to ASIO SDK's asioDrivers. - - This is a bit messy because we want to explicitly manage - allocation/deallocation of this structure, but some layers of the SDK - which we currently use (eg the implementation in asio.cpp) still - use this global version. - - For now we keep it in sync with our local instance in the host - API representation structure, but later we should be able to remove - all dependence on it. -*/ -extern AsioDrivers* asioDrivers; - - -/* We are trying to be compatible with CARBON but this has not been thoroughly tested. */ -/* not tested at all since new V19 code was introduced. */ -#define CARBON_COMPATIBLE (0) - - -/* prototypes for functions declared in this file */ - -extern "C" PaError PaAsio_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex hostApiIndex ); -static void Terminate( struct PaUtilHostApiRepresentation *hostApi ); -static PaError OpenStream( struct PaUtilHostApiRepresentation *hostApi, - PaStream** s, - const PaStreamParameters *inputParameters, - const PaStreamParameters *outputParameters, - double sampleRate, - unsigned long framesPerBuffer, - PaStreamFlags streamFlags, - PaStreamCallback *streamCallback, - void *userData ); -static PaError IsFormatSupported( struct PaUtilHostApiRepresentation *hostApi, - const PaStreamParameters *inputParameters, - const PaStreamParameters *outputParameters, - double sampleRate ); -static PaError CloseStream( PaStream* stream ); -static PaError StartStream( PaStream *stream ); -static PaError StopStream( PaStream *stream ); -static PaError AbortStream( PaStream *stream ); -static PaError IsStreamStopped( PaStream *s ); -static PaError IsStreamActive( PaStream *stream ); -static PaTime GetStreamTime( PaStream *stream ); -static double GetStreamCpuLoad( PaStream* stream ); -static PaError ReadStream( PaStream* stream, void *buffer, unsigned long frames ); -static PaError WriteStream( PaStream* stream, const void *buffer, unsigned long frames ); -static signed long GetStreamReadAvailable( PaStream* stream ); -static signed long GetStreamWriteAvailable( PaStream* stream ); - -/* Blocking i/o callback function. */ -static int BlockingIoPaCallback(const void *inputBuffer , - void *outputBuffer , - unsigned long framesPerBuffer, - const PaStreamCallbackTimeInfo *timeInfo , - PaStreamCallbackFlags statusFlags , - void *userData ); - -/* our ASIO callback functions */ - -static void bufferSwitch(long index, ASIOBool processNow); -static ASIOTime *bufferSwitchTimeInfo(ASIOTime *timeInfo, long index, ASIOBool processNow); -static void sampleRateChanged(ASIOSampleRate sRate); -static long asioMessages(long selector, long value, void* message, double* opt); - -static ASIOCallbacks asioCallbacks_ = - { bufferSwitch, sampleRateChanged, asioMessages, bufferSwitchTimeInfo }; - - -#define PA_ASIO_SET_LAST_HOST_ERROR( errorCode, errorText ) \ - PaUtil_SetLastHostErrorInfo( paASIO, errorCode, errorText ) - - -static void PaAsio_SetLastSystemError( DWORD errorCode ) -{ - LPVOID lpMsgBuf; - FormatMessage( - FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, - NULL, - errorCode, - MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), - (LPTSTR) &lpMsgBuf, - 0, - NULL - ); - PaUtil_SetLastHostErrorInfo( paASIO, errorCode, (const char*)lpMsgBuf ); - LocalFree( lpMsgBuf ); -} - -#define PA_ASIO_SET_LAST_SYSTEM_ERROR( errorCode ) \ - PaAsio_SetLastSystemError( errorCode ) - - -static const char* PaAsio_GetAsioErrorText( ASIOError asioError ) -{ - const char *result; - - switch( asioError ){ - case ASE_OK: - case ASE_SUCCESS: result = "Success"; break; - case ASE_NotPresent: result = "Hardware input or output is not present or available"; break; - case ASE_HWMalfunction: result = "Hardware is malfunctioning"; break; - case ASE_InvalidParameter: result = "Input parameter invalid"; break; - case ASE_InvalidMode: result = "Hardware is in a bad mode or used in a bad mode"; break; - case ASE_SPNotAdvancing: result = "Hardware is not running when sample position is inquired"; break; - case ASE_NoClock: result = "Sample clock or rate cannot be determined or is not present"; break; - case ASE_NoMemory: result = "Not enough memory for completing the request"; break; - default: result = "Unknown ASIO error"; break; - } - - return result; -} - - -#define PA_ASIO_SET_LAST_ASIO_ERROR( asioError ) \ - PaUtil_SetLastHostErrorInfo( paASIO, asioError, PaAsio_GetAsioErrorText( asioError ) ) - - - - -// Atomic increment and decrement operations -#if MAC - /* need to be implemented on Mac */ - inline long PaAsio_AtomicIncrement(volatile long* v) {return ++(*const_cast(v));} - inline long PaAsio_AtomicDecrement(volatile long* v) {return --(*const_cast(v));} -#elif WINDOWS - inline long PaAsio_AtomicIncrement(volatile long* v) {return InterlockedIncrement(const_cast(v));} - inline long PaAsio_AtomicDecrement(volatile long* v) {return InterlockedDecrement(const_cast(v));} -#endif - - - -typedef struct PaAsioDriverInfo -{ - ASIODriverInfo asioDriverInfo; - long inputChannelCount, outputChannelCount; - long bufferMinSize, bufferMaxSize, bufferPreferredSize, bufferGranularity; - bool postOutput; -} -PaAsioDriverInfo; - - -/* PaAsioHostApiRepresentation - host api datastructure specific to this implementation */ - -typedef struct -{ - PaUtilHostApiRepresentation inheritedHostApiRep; - PaUtilStreamInterface callbackStreamInterface; - PaUtilStreamInterface blockingStreamInterface; - - PaUtilAllocationGroup *allocations; - - PaWinUtilComInitializationResult comInitializationResult; - - AsioDrivers *asioDrivers; - void *systemSpecific; - - /* the ASIO C API only allows one ASIO driver to be open at a time, - so we keep track of whether we have the driver open here, and - use this information to return errors from OpenStream if the - driver is already open. - - openAsioDeviceIndex will be PaNoDevice if there is no device open - and a valid pa_asio (not global) device index otherwise. - - openAsioDriverInfo is populated with the driver info for the - currently open device (if any) - */ - PaDeviceIndex openAsioDeviceIndex; - PaAsioDriverInfo openAsioDriverInfo; -} -PaAsioHostApiRepresentation; - - -/* - Retrieve driver names from ASIO, returned in a char** - allocated in . -*/ -static char **GetAsioDriverNames( PaAsioHostApiRepresentation *asioHostApi, PaUtilAllocationGroup *group, long driverCount ) -{ - char **result = 0; - int i; - - result =(char**)PaUtil_GroupAllocateMemory( - group, sizeof(char*) * driverCount ); - if( !result ) - goto error; - - result[0] = (char*)PaUtil_GroupAllocateMemory( - group, 32 * driverCount ); - if( !result[0] ) - goto error; - - for( i=0; iasioDrivers->getDriverNames( result, driverCount ); - -error: - return result; -} - - -static PaSampleFormat AsioSampleTypeToPaNativeSampleFormat(ASIOSampleType type) -{ - switch (type) { - case ASIOSTInt16MSB: - case ASIOSTInt16LSB: - return paInt16; - - case ASIOSTFloat32MSB: - case ASIOSTFloat32LSB: - case ASIOSTFloat64MSB: - case ASIOSTFloat64LSB: - return paFloat32; - - case ASIOSTInt32MSB: - case ASIOSTInt32LSB: - case ASIOSTInt32MSB16: - case ASIOSTInt32LSB16: - case ASIOSTInt32MSB18: - case ASIOSTInt32MSB20: - case ASIOSTInt32MSB24: - case ASIOSTInt32LSB18: - case ASIOSTInt32LSB20: - case ASIOSTInt32LSB24: - return paInt32; - - case ASIOSTInt24MSB: - case ASIOSTInt24LSB: - return paInt24; - - default: - return paCustomFormat; - } -} - -void AsioSampleTypeLOG(ASIOSampleType type) -{ - switch (type) { - case ASIOSTInt16MSB: PA_DEBUG(("ASIOSTInt16MSB\n")); break; - case ASIOSTInt16LSB: PA_DEBUG(("ASIOSTInt16LSB\n")); break; - case ASIOSTFloat32MSB:PA_DEBUG(("ASIOSTFloat32MSB\n"));break; - case ASIOSTFloat32LSB:PA_DEBUG(("ASIOSTFloat32LSB\n"));break; - case ASIOSTFloat64MSB:PA_DEBUG(("ASIOSTFloat64MSB\n"));break; - case ASIOSTFloat64LSB:PA_DEBUG(("ASIOSTFloat64LSB\n"));break; - case ASIOSTInt32MSB: PA_DEBUG(("ASIOSTInt32MSB\n")); break; - case ASIOSTInt32LSB: PA_DEBUG(("ASIOSTInt32LSB\n")); break; - case ASIOSTInt32MSB16:PA_DEBUG(("ASIOSTInt32MSB16\n"));break; - case ASIOSTInt32LSB16:PA_DEBUG(("ASIOSTInt32LSB16\n"));break; - case ASIOSTInt32MSB18:PA_DEBUG(("ASIOSTInt32MSB18\n"));break; - case ASIOSTInt32MSB20:PA_DEBUG(("ASIOSTInt32MSB20\n"));break; - case ASIOSTInt32MSB24:PA_DEBUG(("ASIOSTInt32MSB24\n"));break; - case ASIOSTInt32LSB18:PA_DEBUG(("ASIOSTInt32LSB18\n"));break; - case ASIOSTInt32LSB20:PA_DEBUG(("ASIOSTInt32LSB20\n"));break; - case ASIOSTInt32LSB24:PA_DEBUG(("ASIOSTInt32LSB24\n"));break; - case ASIOSTInt24MSB: PA_DEBUG(("ASIOSTInt24MSB\n")); break; - case ASIOSTInt24LSB: PA_DEBUG(("ASIOSTInt24LSB\n")); break; - default: PA_DEBUG(("Custom Format%d\n",type));break; - - } -} - -static int BytesPerAsioSample( ASIOSampleType sampleType ) -{ - switch (sampleType) { - case ASIOSTInt16MSB: - case ASIOSTInt16LSB: - return 2; - - case ASIOSTFloat64MSB: - case ASIOSTFloat64LSB: - return 8; - - case ASIOSTFloat32MSB: - case ASIOSTFloat32LSB: - case ASIOSTInt32MSB: - case ASIOSTInt32LSB: - case ASIOSTInt32MSB16: - case ASIOSTInt32LSB16: - case ASIOSTInt32MSB18: - case ASIOSTInt32MSB20: - case ASIOSTInt32MSB24: - case ASIOSTInt32LSB18: - case ASIOSTInt32LSB20: - case ASIOSTInt32LSB24: - return 4; - - case ASIOSTInt24MSB: - case ASIOSTInt24LSB: - return 3; - - default: - return 0; - } -} - - -static void Swap16( void *buffer, long shift, long count ) -{ - unsigned short *p = (unsigned short*)buffer; - unsigned short temp; - (void) shift; /* unused parameter */ - - while( count-- ) - { - temp = *p; - *p++ = (unsigned short)((temp<<8) | (temp>>8)); - } -} - -static void Swap24( void *buffer, long shift, long count ) -{ - unsigned char *p = (unsigned char*)buffer; - unsigned char temp; - (void) shift; /* unused parameter */ - - while( count-- ) - { - temp = *p; - *p = *(p+2); - *(p+2) = temp; - p += 3; - } -} - -#define PA_SWAP32_( x ) ((x>>24) | ((x>>8)&0xFF00) | ((x<<8)&0xFF0000) | (x<<24)); - -static void Swap32( void *buffer, long shift, long count ) -{ - unsigned long *p = (unsigned long*)buffer; - unsigned long temp; - (void) shift; /* unused parameter */ - - while( count-- ) - { - temp = *p; - *p++ = PA_SWAP32_( temp); - } -} - -static void SwapShiftLeft32( void *buffer, long shift, long count ) -{ - unsigned long *p = (unsigned long*)buffer; - unsigned long temp; - - while( count-- ) - { - temp = *p; - temp = PA_SWAP32_( temp); - *p++ = temp << shift; - } -} - -static void ShiftRightSwap32( void *buffer, long shift, long count ) -{ - unsigned long *p = (unsigned long*)buffer; - unsigned long temp; - - while( count-- ) - { - temp = *p >> shift; - *p++ = PA_SWAP32_( temp); - } -} - -static void ShiftLeft32( void *buffer, long shift, long count ) -{ - unsigned long *p = (unsigned long*)buffer; - unsigned long temp; - - while( count-- ) - { - temp = *p; - *p++ = temp << shift; - } -} - -static void ShiftRight32( void *buffer, long shift, long count ) -{ - unsigned long *p = (unsigned long*)buffer; - unsigned long temp; - - while( count-- ) - { - temp = *p; - *p++ = temp >> shift; - } -} - -#define PA_SWAP_( x, y ) temp=x; x = y; y = temp; - -static void Swap64ConvertFloat64ToFloat32( void *buffer, long shift, long count ) -{ - double *in = (double*)buffer; - float *out = (float*)buffer; - unsigned char *p; - unsigned char temp; - (void) shift; /* unused parameter */ - - while( count-- ) - { - p = (unsigned char*)in; - PA_SWAP_( p[0], p[7] ); - PA_SWAP_( p[1], p[6] ); - PA_SWAP_( p[2], p[5] ); - PA_SWAP_( p[3], p[4] ); - - *out++ = (float) (*in++); - } -} - -static void ConvertFloat64ToFloat32( void *buffer, long shift, long count ) -{ - double *in = (double*)buffer; - float *out = (float*)buffer; - (void) shift; /* unused parameter */ - - while( count-- ) - *out++ = (float) (*in++); -} - -static void ConvertFloat32ToFloat64Swap64( void *buffer, long shift, long count ) -{ - float *in = ((float*)buffer) + (count-1); - double *out = ((double*)buffer) + (count-1); - unsigned char *p; - unsigned char temp; - (void) shift; /* unused parameter */ - - while( count-- ) - { - *out = *in--; - - p = (unsigned char*)out; - PA_SWAP_( p[0], p[7] ); - PA_SWAP_( p[1], p[6] ); - PA_SWAP_( p[2], p[5] ); - PA_SWAP_( p[3], p[4] ); - - out--; - } -} - -static void ConvertFloat32ToFloat64( void *buffer, long shift, long count ) -{ - float *in = ((float*)buffer) + (count-1); - double *out = ((double*)buffer) + (count-1); - (void) shift; /* unused parameter */ - - while( count-- ) - *out-- = *in--; -} - -#ifdef MAC -#define PA_MSB_IS_NATIVE_ -#undef PA_LSB_IS_NATIVE_ -#endif - -#ifdef WINDOWS -#undef PA_MSB_IS_NATIVE_ -#define PA_LSB_IS_NATIVE_ -#endif - -typedef void PaAsioBufferConverter( void *, long, long ); - -static void SelectAsioToPaConverter( ASIOSampleType type, PaAsioBufferConverter **converter, long *shift ) -{ - *shift = 0; - *converter = 0; - - switch (type) { - case ASIOSTInt16MSB: - /* dest: paInt16, no conversion necessary, possible byte swap*/ - #ifdef PA_LSB_IS_NATIVE_ - *converter = Swap16; - #endif - break; - case ASIOSTInt16LSB: - /* dest: paInt16, no conversion necessary, possible byte swap*/ - #ifdef PA_MSB_IS_NATIVE_ - *converter = Swap16; - #endif - break; - case ASIOSTFloat32MSB: - /* dest: paFloat32, no conversion necessary, possible byte swap*/ - #ifdef PA_LSB_IS_NATIVE_ - *converter = Swap32; - #endif - break; - case ASIOSTFloat32LSB: - /* dest: paFloat32, no conversion necessary, possible byte swap*/ - #ifdef PA_MSB_IS_NATIVE_ - *converter = Swap32; - #endif - break; - case ASIOSTFloat64MSB: - /* dest: paFloat32, in-place conversion to/from float32, possible byte swap*/ - #ifdef PA_LSB_IS_NATIVE_ - *converter = Swap64ConvertFloat64ToFloat32; - #else - *converter = ConvertFloat64ToFloat32; - #endif - break; - case ASIOSTFloat64LSB: - /* dest: paFloat32, in-place conversion to/from float32, possible byte swap*/ - #ifdef PA_MSB_IS_NATIVE_ - *converter = Swap64ConvertFloat64ToFloat32; - #else - *converter = ConvertFloat64ToFloat32; - #endif - break; - case ASIOSTInt32MSB: - /* dest: paInt32, no conversion necessary, possible byte swap */ - #ifdef PA_LSB_IS_NATIVE_ - *converter = Swap32; - #endif - break; - case ASIOSTInt32LSB: - /* dest: paInt32, no conversion necessary, possible byte swap */ - #ifdef PA_MSB_IS_NATIVE_ - *converter = Swap32; - #endif - break; - case ASIOSTInt32MSB16: - /* dest: paInt32, 16 bit shift, possible byte swap */ - #ifdef PA_LSB_IS_NATIVE_ - *converter = SwapShiftLeft32; - #else - *converter = ShiftLeft32; - #endif - *shift = 16; - break; - case ASIOSTInt32MSB18: - /* dest: paInt32, 14 bit shift, possible byte swap */ - #ifdef PA_LSB_IS_NATIVE_ - *converter = SwapShiftLeft32; - #else - *converter = ShiftLeft32; - #endif - *shift = 14; - break; - case ASIOSTInt32MSB20: - /* dest: paInt32, 12 bit shift, possible byte swap */ - #ifdef PA_LSB_IS_NATIVE_ - *converter = SwapShiftLeft32; - #else - *converter = ShiftLeft32; - #endif - *shift = 12; - break; - case ASIOSTInt32MSB24: - /* dest: paInt32, 8 bit shift, possible byte swap */ - #ifdef PA_LSB_IS_NATIVE_ - *converter = SwapShiftLeft32; - #else - *converter = ShiftLeft32; - #endif - *shift = 8; - break; - case ASIOSTInt32LSB16: - /* dest: paInt32, 16 bit shift, possible byte swap */ - #ifdef PA_MSB_IS_NATIVE_ - *converter = SwapShiftLeft32; - #else - *converter = ShiftLeft32; - #endif - *shift = 16; - break; - case ASIOSTInt32LSB18: - /* dest: paInt32, 14 bit shift, possible byte swap */ - #ifdef PA_MSB_IS_NATIVE_ - *converter = SwapShiftLeft32; - #else - *converter = ShiftLeft32; - #endif - *shift = 14; - break; - case ASIOSTInt32LSB20: - /* dest: paInt32, 12 bit shift, possible byte swap */ - #ifdef PA_MSB_IS_NATIVE_ - *converter = SwapShiftLeft32; - #else - *converter = ShiftLeft32; - #endif - *shift = 12; - break; - case ASIOSTInt32LSB24: - /* dest: paInt32, 8 bit shift, possible byte swap */ - #ifdef PA_MSB_IS_NATIVE_ - *converter = SwapShiftLeft32; - #else - *converter = ShiftLeft32; - #endif - *shift = 8; - break; - case ASIOSTInt24MSB: - /* dest: paInt24, no conversion necessary, possible byte swap */ - #ifdef PA_LSB_IS_NATIVE_ - *converter = Swap24; - #endif - break; - case ASIOSTInt24LSB: - /* dest: paInt24, no conversion necessary, possible byte swap */ - #ifdef PA_MSB_IS_NATIVE_ - *converter = Swap24; - #endif - break; - } -} - - -static void SelectPaToAsioConverter( ASIOSampleType type, PaAsioBufferConverter **converter, long *shift ) -{ - *shift = 0; - *converter = 0; - - switch (type) { - case ASIOSTInt16MSB: - /* src: paInt16, no conversion necessary, possible byte swap*/ - #ifdef PA_LSB_IS_NATIVE_ - *converter = Swap16; - #endif - break; - case ASIOSTInt16LSB: - /* src: paInt16, no conversion necessary, possible byte swap*/ - #ifdef PA_MSB_IS_NATIVE_ - *converter = Swap16; - #endif - break; - case ASIOSTFloat32MSB: - /* src: paFloat32, no conversion necessary, possible byte swap*/ - #ifdef PA_LSB_IS_NATIVE_ - *converter = Swap32; - #endif - break; - case ASIOSTFloat32LSB: - /* src: paFloat32, no conversion necessary, possible byte swap*/ - #ifdef PA_MSB_IS_NATIVE_ - *converter = Swap32; - #endif - break; - case ASIOSTFloat64MSB: - /* src: paFloat32, in-place conversion to/from float32, possible byte swap*/ - #ifdef PA_LSB_IS_NATIVE_ - *converter = ConvertFloat32ToFloat64Swap64; - #else - *converter = ConvertFloat32ToFloat64; - #endif - break; - case ASIOSTFloat64LSB: - /* src: paFloat32, in-place conversion to/from float32, possible byte swap*/ - #ifdef PA_MSB_IS_NATIVE_ - *converter = ConvertFloat32ToFloat64Swap64; - #else - *converter = ConvertFloat32ToFloat64; - #endif - break; - case ASIOSTInt32MSB: - /* src: paInt32, no conversion necessary, possible byte swap */ - #ifdef PA_LSB_IS_NATIVE_ - *converter = Swap32; - #endif - break; - case ASIOSTInt32LSB: - /* src: paInt32, no conversion necessary, possible byte swap */ - #ifdef PA_MSB_IS_NATIVE_ - *converter = Swap32; - #endif - break; - case ASIOSTInt32MSB16: - /* src: paInt32, 16 bit shift, possible byte swap */ - #ifdef PA_LSB_IS_NATIVE_ - *converter = ShiftRightSwap32; - #else - *converter = ShiftRight32; - #endif - *shift = 16; - break; - case ASIOSTInt32MSB18: - /* src: paInt32, 14 bit shift, possible byte swap */ - #ifdef PA_LSB_IS_NATIVE_ - *converter = ShiftRightSwap32; - #else - *converter = ShiftRight32; - #endif - *shift = 14; - break; - case ASIOSTInt32MSB20: - /* src: paInt32, 12 bit shift, possible byte swap */ - #ifdef PA_LSB_IS_NATIVE_ - *converter = ShiftRightSwap32; - #else - *converter = ShiftRight32; - #endif - *shift = 12; - break; - case ASIOSTInt32MSB24: - /* src: paInt32, 8 bit shift, possible byte swap */ - #ifdef PA_LSB_IS_NATIVE_ - *converter = ShiftRightSwap32; - #else - *converter = ShiftRight32; - #endif - *shift = 8; - break; - case ASIOSTInt32LSB16: - /* src: paInt32, 16 bit shift, possible byte swap */ - #ifdef PA_MSB_IS_NATIVE_ - *converter = ShiftRightSwap32; - #else - *converter = ShiftRight32; - #endif - *shift = 16; - break; - case ASIOSTInt32LSB18: - /* src: paInt32, 14 bit shift, possible byte swap */ - #ifdef PA_MSB_IS_NATIVE_ - *converter = ShiftRightSwap32; - #else - *converter = ShiftRight32; - #endif - *shift = 14; - break; - case ASIOSTInt32LSB20: - /* src: paInt32, 12 bit shift, possible byte swap */ - #ifdef PA_MSB_IS_NATIVE_ - *converter = ShiftRightSwap32; - #else - *converter = ShiftRight32; - #endif - *shift = 12; - break; - case ASIOSTInt32LSB24: - /* src: paInt32, 8 bit shift, possible byte swap */ - #ifdef PA_MSB_IS_NATIVE_ - *converter = ShiftRightSwap32; - #else - *converter = ShiftRight32; - #endif - *shift = 8; - break; - case ASIOSTInt24MSB: - /* src: paInt24, no conversion necessary, possible byte swap */ - #ifdef PA_LSB_IS_NATIVE_ - *converter = Swap24; - #endif - break; - case ASIOSTInt24LSB: - /* src: paInt24, no conversion necessary, possible byte swap */ - #ifdef PA_MSB_IS_NATIVE_ - *converter = Swap24; - #endif - break; - } -} - - -typedef struct PaAsioDeviceInfo -{ - PaDeviceInfo commonDeviceInfo; - long minBufferSize; - long maxBufferSize; - long preferredBufferSize; - long bufferGranularity; - - ASIOChannelInfo *asioChannelInfos; -} -PaAsioDeviceInfo; - - -PaError PaAsio_GetAvailableBufferSizes( PaDeviceIndex device, - long *minBufferSizeFrames, long *maxBufferSizeFrames, long *preferredBufferSizeFrames, long *granularity ) -{ - PaError result; - PaUtilHostApiRepresentation *hostApi; - PaDeviceIndex hostApiDevice; - - result = PaUtil_GetHostApiRepresentation( &hostApi, paASIO ); - - if( result == paNoError ) - { - result = PaUtil_DeviceIndexToHostApiDeviceIndex( &hostApiDevice, device, hostApi ); - - if( result == paNoError ) - { - PaAsioDeviceInfo *asioDeviceInfo = - (PaAsioDeviceInfo*)hostApi->deviceInfos[hostApiDevice]; - - *minBufferSizeFrames = asioDeviceInfo->minBufferSize; - *maxBufferSizeFrames = asioDeviceInfo->maxBufferSize; - *preferredBufferSizeFrames = asioDeviceInfo->preferredBufferSize; - *granularity = asioDeviceInfo->bufferGranularity; - } - } - - return result; -} - -/* Unload whatever we loaded in LoadAsioDriver(). -*/ -static void UnloadAsioDriver( void ) -{ - ASIOExit(); -} - -/* - load the asio driver named by and return statistics about - the driver in info. If no error occurred, the driver will remain open - and must be closed by the called by calling UnloadAsioDriver() - if an error - is returned the driver will already be unloaded. -*/ -static PaError LoadAsioDriver( PaAsioHostApiRepresentation *asioHostApi, const char *driverName, - PaAsioDriverInfo *driverInfo, void *systemSpecific ) -{ - PaError result = paNoError; - ASIOError asioError; - int asioIsInitialized = 0; - - if( !asioHostApi->asioDrivers->loadDriver( const_cast(driverName) ) ) - { - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_HOST_ERROR( 0, "Failed to load ASIO driver" ); - goto error; - } - - memset( &driverInfo->asioDriverInfo, 0, sizeof(ASIODriverInfo) ); - driverInfo->asioDriverInfo.asioVersion = 2; - driverInfo->asioDriverInfo.sysRef = systemSpecific; - if( (asioError = ASIOInit( &driverInfo->asioDriverInfo )) != ASE_OK ) - { - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_ASIO_ERROR( asioError ); - goto error; - } - else - { - asioIsInitialized = 1; - } - - if( (asioError = ASIOGetChannels(&driverInfo->inputChannelCount, - &driverInfo->outputChannelCount)) != ASE_OK ) - { - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_ASIO_ERROR( asioError ); - goto error; - } - - if( (asioError = ASIOGetBufferSize(&driverInfo->bufferMinSize, - &driverInfo->bufferMaxSize, &driverInfo->bufferPreferredSize, - &driverInfo->bufferGranularity)) != ASE_OK ) - { - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_ASIO_ERROR( asioError ); - goto error; - } - - if( ASIOOutputReady() == ASE_OK ) - driverInfo->postOutput = true; - else - driverInfo->postOutput = false; - - return result; - -error: - if( asioIsInitialized ) - { - ASIOExit(); - } - - return result; -} - - -#define PA_DEFAULTSAMPLERATESEARCHORDER_COUNT_ 13 /* must be the same number of elements as in the array below */ -static ASIOSampleRate defaultSampleRateSearchOrder_[] - = {44100.0, 48000.0, 32000.0, 24000.0, 22050.0, 88200.0, 96000.0, - 192000.0, 16000.0, 12000.0, 11025.0, 9600.0, 8000.0 }; - - -static PaError InitPaDeviceInfoFromAsioDriver( PaAsioHostApiRepresentation *asioHostApi, - const char *driverName, int driverIndex, - PaDeviceInfo *deviceInfo, PaAsioDeviceInfo *asioDeviceInfo ) -{ - PaError result = paNoError; - - /* Due to the headless design of the ASIO API, drivers are free to write over data given to them (like M-Audio - drivers f.i.). This is an attempt to overcome that. */ - union _tag_local { - PaAsioDriverInfo info; - char _padding[4096]; - } paAsioDriver; - - asioDeviceInfo->asioChannelInfos = 0; /* we check this below to handle error cleanup */ - - result = LoadAsioDriver( asioHostApi, driverName, &paAsioDriver.info, asioHostApi->systemSpecific ); - if( result == paNoError ) - { - PA_DEBUG(("PaAsio_Initialize: drv:%d name = %s\n", driverIndex,deviceInfo->name)); - PA_DEBUG(("PaAsio_Initialize: drv:%d inputChannels = %d\n", driverIndex, paAsioDriver.info.inputChannelCount)); - PA_DEBUG(("PaAsio_Initialize: drv:%d outputChannels = %d\n", driverIndex, paAsioDriver.info.outputChannelCount)); - PA_DEBUG(("PaAsio_Initialize: drv:%d bufferMinSize = %d\n", driverIndex, paAsioDriver.info.bufferMinSize)); - PA_DEBUG(("PaAsio_Initialize: drv:%d bufferMaxSize = %d\n", driverIndex, paAsioDriver.info.bufferMaxSize)); - PA_DEBUG(("PaAsio_Initialize: drv:%d bufferPreferredSize = %d\n", driverIndex, paAsioDriver.info.bufferPreferredSize)); - PA_DEBUG(("PaAsio_Initialize: drv:%d bufferGranularity = %d\n", driverIndex, paAsioDriver.info.bufferGranularity)); - - deviceInfo->maxInputChannels = paAsioDriver.info.inputChannelCount; - deviceInfo->maxOutputChannels = paAsioDriver.info.outputChannelCount; - - deviceInfo->defaultSampleRate = 0.; - bool foundDefaultSampleRate = false; - for( int j=0; j < PA_DEFAULTSAMPLERATESEARCHORDER_COUNT_; ++j ) - { - ASIOError asioError = ASIOCanSampleRate( defaultSampleRateSearchOrder_[j] ); - if( asioError != ASE_NoClock && asioError != ASE_NotPresent ) - { - deviceInfo->defaultSampleRate = defaultSampleRateSearchOrder_[j]; - foundDefaultSampleRate = true; - break; - } - } - - PA_DEBUG(("PaAsio_Initialize: drv:%d defaultSampleRate = %f\n", driverIndex, deviceInfo->defaultSampleRate)); - - if( foundDefaultSampleRate ){ - - /* calculate default latency values from bufferPreferredSize - for default low latency, and bufferMaxSize - for default high latency. - use the default sample rate to convert from samples to - seconds. Without knowing what sample rate the user will - use this is the best we can do. - */ - - double defaultLowLatency = - paAsioDriver.info.bufferPreferredSize / deviceInfo->defaultSampleRate; - - deviceInfo->defaultLowInputLatency = defaultLowLatency; - deviceInfo->defaultLowOutputLatency = defaultLowLatency; - - double defaultHighLatency = - paAsioDriver.info.bufferMaxSize / deviceInfo->defaultSampleRate; - - if( defaultHighLatency < defaultLowLatency ) - defaultHighLatency = defaultLowLatency; /* just in case the driver returns something strange */ - - deviceInfo->defaultHighInputLatency = defaultHighLatency; - deviceInfo->defaultHighOutputLatency = defaultHighLatency; - - }else{ - - deviceInfo->defaultLowInputLatency = 0.; - deviceInfo->defaultLowOutputLatency = 0.; - deviceInfo->defaultHighInputLatency = 0.; - deviceInfo->defaultHighOutputLatency = 0.; - } - - PA_DEBUG(("PaAsio_Initialize: drv:%d defaultLowInputLatency = %f\n", driverIndex, deviceInfo->defaultLowInputLatency)); - PA_DEBUG(("PaAsio_Initialize: drv:%d defaultLowOutputLatency = %f\n", driverIndex, deviceInfo->defaultLowOutputLatency)); - PA_DEBUG(("PaAsio_Initialize: drv:%d defaultHighInputLatency = %f\n", driverIndex, deviceInfo->defaultHighInputLatency)); - PA_DEBUG(("PaAsio_Initialize: drv:%d defaultHighOutputLatency = %f\n", driverIndex, deviceInfo->defaultHighOutputLatency)); - - asioDeviceInfo->minBufferSize = paAsioDriver.info.bufferMinSize; - asioDeviceInfo->maxBufferSize = paAsioDriver.info.bufferMaxSize; - asioDeviceInfo->preferredBufferSize = paAsioDriver.info.bufferPreferredSize; - asioDeviceInfo->bufferGranularity = paAsioDriver.info.bufferGranularity; - - - asioDeviceInfo->asioChannelInfos = (ASIOChannelInfo*)PaUtil_GroupAllocateMemory( - asioHostApi->allocations, - sizeof(ASIOChannelInfo) * (deviceInfo->maxInputChannels - + deviceInfo->maxOutputChannels) ); - if( !asioDeviceInfo->asioChannelInfos ) - { - result = paInsufficientMemory; - goto error_unload; - } - - int a; - - for( a=0; a < deviceInfo->maxInputChannels; ++a ){ - asioDeviceInfo->asioChannelInfos[a].channel = a; - asioDeviceInfo->asioChannelInfos[a].isInput = ASIOTrue; - ASIOError asioError = ASIOGetChannelInfo( &asioDeviceInfo->asioChannelInfos[a] ); - if( asioError != ASE_OK ) - { - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_ASIO_ERROR( asioError ); - goto error_unload; - } - } - - for( a=0; a < deviceInfo->maxOutputChannels; ++a ){ - int b = deviceInfo->maxInputChannels + a; - asioDeviceInfo->asioChannelInfos[b].channel = a; - asioDeviceInfo->asioChannelInfos[b].isInput = ASIOFalse; - ASIOError asioError = ASIOGetChannelInfo( &asioDeviceInfo->asioChannelInfos[b] ); - if( asioError != ASE_OK ) - { - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_ASIO_ERROR( asioError ); - goto error_unload; - } - } - - /* unload the driver */ - UnloadAsioDriver(); - } - - return result; - -error_unload: - UnloadAsioDriver(); - - if( asioDeviceInfo->asioChannelInfos ){ - PaUtil_GroupFreeMemory( asioHostApi->allocations, asioDeviceInfo->asioChannelInfos ); - asioDeviceInfo->asioChannelInfos = 0; - } - - return result; -} - - -/* we look up IsDebuggerPresent at runtime incase it isn't present (on Win95 for example) */ -typedef BOOL (WINAPI *IsDebuggerPresentPtr)(VOID); -IsDebuggerPresentPtr IsDebuggerPresent_ = 0; -//FARPROC IsDebuggerPresent_ = 0; // this is the current way to do it apparently according to davidv - -PaError PaAsio_Initialize( PaUtilHostApiRepresentation **hostApi, PaHostApiIndex hostApiIndex ) -{ - PaError result = paNoError; - int i, driverCount; - PaAsioHostApiRepresentation *asioHostApi; - PaAsioDeviceInfo *deviceInfoArray; - char **names; - asioHostApi = (PaAsioHostApiRepresentation*)PaUtil_AllocateMemory( sizeof(PaAsioHostApiRepresentation) ); - if( !asioHostApi ) - { - result = paInsufficientMemory; - goto error; - } - - memset( asioHostApi, 0, sizeof(PaAsioHostApiRepresentation) ); /* ensure all fields are zeroed. especially asioHostApi->allocations */ - - /* - We initialize COM ourselves here and uninitialize it in Terminate(). - This should be the only COM initialization needed in this module. - - The ASIO SDK may also initialize COM but since we want to reduce dependency - on the ASIO SDK we manage COM initialization ourselves. - - There used to be code that initialized COM in other situations - such as when creating a Stream. This made PA work when calling Pa_CreateStream - from a non-main thread. However we currently consider initialization - of COM in non-main threads to be the caller's responsibility. - */ - result = PaWinUtil_CoInitialize( paASIO, &asioHostApi->comInitializationResult ); - if( result != paNoError ) - { - goto error; - } - - asioHostApi->asioDrivers = 0; /* avoid surprises in our error handler below */ - - asioHostApi->allocations = PaUtil_CreateAllocationGroup(); - if( !asioHostApi->allocations ) - { - result = paInsufficientMemory; - goto error; - } - - /* Allocate the AsioDrivers() driver list (class from ASIO SDK) */ - try - { - asioHostApi->asioDrivers = new AsioDrivers(); /* invokes CoInitialize(0) in AsioDriverList::AsioDriverList */ - } - catch (std::bad_alloc) - { - asioHostApi->asioDrivers = 0; - } - /* some implementations of new (ie MSVC, see http://support.microsoft.com/?kbid=167733) - don't throw std::bad_alloc, so we also explicitly test for a null return. */ - if( asioHostApi->asioDrivers == 0 ) - { - result = paInsufficientMemory; - goto error; - } - - asioDrivers = asioHostApi->asioDrivers; /* keep SDK global in sync until we stop depending on it */ - - asioHostApi->systemSpecific = 0; - asioHostApi->openAsioDeviceIndex = paNoDevice; - - *hostApi = &asioHostApi->inheritedHostApiRep; - (*hostApi)->info.structVersion = 1; - - (*hostApi)->info.type = paASIO; - (*hostApi)->info.name = "ASIO"; - (*hostApi)->info.deviceCount = 0; - - #ifdef WINDOWS - /* use desktop window as system specific ptr */ - asioHostApi->systemSpecific = GetDesktopWindow(); - #endif - - /* driverCount is the number of installed drivers - not necessarily - the number of installed physical devices. */ - #if MAC - driverCount = asioHostApi->asioDrivers->getNumFragments(); - #elif WINDOWS - driverCount = asioHostApi->asioDrivers->asioGetNumDev(); - #endif - - if( driverCount > 0 ) - { - names = GetAsioDriverNames( asioHostApi, asioHostApi->allocations, driverCount ); - if( !names ) - { - result = paInsufficientMemory; - goto error; - } - - - /* allocate enough space for all drivers, even if some aren't installed */ - - (*hostApi)->deviceInfos = (PaDeviceInfo**)PaUtil_GroupAllocateMemory( - asioHostApi->allocations, sizeof(PaDeviceInfo*) * driverCount ); - if( !(*hostApi)->deviceInfos ) - { - result = paInsufficientMemory; - goto error; - } - - /* allocate all device info structs in a contiguous block */ - deviceInfoArray = (PaAsioDeviceInfo*)PaUtil_GroupAllocateMemory( - asioHostApi->allocations, sizeof(PaAsioDeviceInfo) * driverCount ); - if( !deviceInfoArray ) - { - result = paInsufficientMemory; - goto error; - } - - IsDebuggerPresent_ = (IsDebuggerPresentPtr)GetProcAddress( LoadLibraryA( "Kernel32.dll" ), "IsDebuggerPresent" ); - - for( i=0; i < driverCount; ++i ) - { - PA_DEBUG(("ASIO names[%d]:%s\n",i,names[i])); - - // Since portaudio opens ALL ASIO drivers, and no one else does that, - // we face fact that some drivers were not meant for it, drivers which act - // like shells on top of REAL drivers, for instance. - // so we get duplicate handles, locks and other problems. - // so lets NOT try to load any such wrappers. - // The ones i [davidv] know of so far are: - - if ( strcmp (names[i],"ASIO DirectX Full Duplex Driver") == 0 - || strcmp (names[i],"ASIO Multimedia Driver") == 0 - || strncmp(names[i],"Premiere",8) == 0 //"Premiere Elements Windows Sound 1.0" - || strncmp(names[i],"Adobe",5) == 0 //"Adobe Default Windows Sound 1.5" - ) - { - PA_DEBUG(("BLACKLISTED!!!\n")); - continue; - } - - - if( IsDebuggerPresent_ && IsDebuggerPresent_() ) - { - /* ASIO Digidesign Driver uses PACE copy protection which quits out - if a debugger is running. So we don't load it if a debugger is running. */ - if( strcmp(names[i], "ASIO Digidesign Driver") == 0 ) - { - PA_DEBUG(("BLACKLISTED!!! ASIO Digidesign Driver would quit the debugger\n")); - continue; - } - } - - - /* Attempt to init device info from the asio driver... */ - { - PaAsioDeviceInfo *asioDeviceInfo = &deviceInfoArray[ (*hostApi)->info.deviceCount ]; - PaDeviceInfo *deviceInfo = &asioDeviceInfo->commonDeviceInfo; - - deviceInfo->structVersion = 2; - deviceInfo->hostApi = hostApiIndex; - - deviceInfo->name = names[i]; - - if( InitPaDeviceInfoFromAsioDriver( asioHostApi, names[i], i, deviceInfo, asioDeviceInfo ) == paNoError ) - { - (*hostApi)->deviceInfos[ (*hostApi)->info.deviceCount ] = deviceInfo; - ++(*hostApi)->info.deviceCount; - } - else - { - PA_DEBUG(("Skipping ASIO device:%s\n",names[i])); - continue; - } - } - } - } - - if( (*hostApi)->info.deviceCount > 0 ) - { - (*hostApi)->info.defaultInputDevice = 0; - (*hostApi)->info.defaultOutputDevice = 0; - } - else - { - (*hostApi)->info.defaultInputDevice = paNoDevice; - (*hostApi)->info.defaultOutputDevice = paNoDevice; - } - - - (*hostApi)->Terminate = Terminate; - (*hostApi)->OpenStream = OpenStream; - (*hostApi)->IsFormatSupported = IsFormatSupported; - - PaUtil_InitializeStreamInterface( &asioHostApi->callbackStreamInterface, CloseStream, StartStream, - StopStream, AbortStream, IsStreamStopped, IsStreamActive, - GetStreamTime, GetStreamCpuLoad, - PaUtil_DummyRead, PaUtil_DummyWrite, - PaUtil_DummyGetReadAvailable, PaUtil_DummyGetWriteAvailable ); - - PaUtil_InitializeStreamInterface( &asioHostApi->blockingStreamInterface, CloseStream, StartStream, - StopStream, AbortStream, IsStreamStopped, IsStreamActive, - GetStreamTime, PaUtil_DummyGetCpuLoad, - ReadStream, WriteStream, GetStreamReadAvailable, GetStreamWriteAvailable ); - - return result; - -error: - if( asioHostApi ) - { - if( asioHostApi->allocations ) - { - PaUtil_FreeAllAllocations( asioHostApi->allocations ); - PaUtil_DestroyAllocationGroup( asioHostApi->allocations ); - } - - delete asioHostApi->asioDrivers; - asioDrivers = 0; /* keep SDK global in sync until we stop depending on it */ - - PaWinUtil_CoUninitialize( paASIO, &asioHostApi->comInitializationResult ); - - PaUtil_FreeMemory( asioHostApi ); - } - - return result; -} - - -static void Terminate( struct PaUtilHostApiRepresentation *hostApi ) -{ - PaAsioHostApiRepresentation *asioHostApi = (PaAsioHostApiRepresentation*)hostApi; - - /* - IMPLEMENT ME: - - clean up any resources not handled by the allocation group (need to review if there are any) - */ - - if( asioHostApi->allocations ) - { - PaUtil_FreeAllAllocations( asioHostApi->allocations ); - PaUtil_DestroyAllocationGroup( asioHostApi->allocations ); - } - - delete asioHostApi->asioDrivers; - asioDrivers = 0; /* keep SDK global in sync until we stop depending on it */ - - PaWinUtil_CoUninitialize( paASIO, &asioHostApi->comInitializationResult ); - - PaUtil_FreeMemory( asioHostApi ); -} - - -static PaError IsFormatSupported( struct PaUtilHostApiRepresentation *hostApi, - const PaStreamParameters *inputParameters, - const PaStreamParameters *outputParameters, - double sampleRate ) -{ - PaError result = paNoError; - PaAsioHostApiRepresentation *asioHostApi = (PaAsioHostApiRepresentation*)hostApi; - PaAsioDriverInfo *driverInfo = &asioHostApi->openAsioDriverInfo; - int inputChannelCount, outputChannelCount; - PaSampleFormat inputSampleFormat, outputSampleFormat; - PaDeviceIndex asioDeviceIndex; - ASIOError asioError; - - if( inputParameters && outputParameters ) - { - /* full duplex ASIO stream must use the same device for input and output */ - - if( inputParameters->device != outputParameters->device ) - return paBadIODeviceCombination; - } - - if( inputParameters ) - { - inputChannelCount = inputParameters->channelCount; - inputSampleFormat = inputParameters->sampleFormat; - - /* all standard sample formats are supported by the buffer adapter, - this implementation doesn't support any custom sample formats */ - if( inputSampleFormat & paCustomFormat ) - return paSampleFormatNotSupported; - - /* unless alternate device specification is supported, reject the use of - paUseHostApiSpecificDeviceSpecification */ - - if( inputParameters->device == paUseHostApiSpecificDeviceSpecification ) - return paInvalidDevice; - - asioDeviceIndex = inputParameters->device; - - /* validate inputStreamInfo */ - /** @todo do more validation here */ - // if( inputParameters->hostApiSpecificStreamInfo ) - // return paIncompatibleHostApiSpecificStreamInfo; /* this implementation doesn't use custom stream info */ - } - else - { - inputChannelCount = 0; - } - - if( outputParameters ) - { - outputChannelCount = outputParameters->channelCount; - outputSampleFormat = outputParameters->sampleFormat; - - /* all standard sample formats are supported by the buffer adapter, - this implementation doesn't support any custom sample formats */ - if( outputSampleFormat & paCustomFormat ) - return paSampleFormatNotSupported; - - /* unless alternate device specification is supported, reject the use of - paUseHostApiSpecificDeviceSpecification */ - - if( outputParameters->device == paUseHostApiSpecificDeviceSpecification ) - return paInvalidDevice; - - asioDeviceIndex = outputParameters->device; - - /* validate outputStreamInfo */ - /** @todo do more validation here */ - // if( outputParameters->hostApiSpecificStreamInfo ) - // return paIncompatibleHostApiSpecificStreamInfo; /* this implementation doesn't use custom stream info */ - } - else - { - outputChannelCount = 0; - } - - - - /* if an ASIO device is open we can only get format information for the currently open device */ - - if( asioHostApi->openAsioDeviceIndex != paNoDevice - && asioHostApi->openAsioDeviceIndex != asioDeviceIndex ) - { - return paDeviceUnavailable; - } - - - /* NOTE: we load the driver and use its current settings - rather than the ones in our device info structure which may be stale */ - - /* open the device if it's not already open */ - if( asioHostApi->openAsioDeviceIndex == paNoDevice ) - { - result = LoadAsioDriver( asioHostApi, asioHostApi->inheritedHostApiRep.deviceInfos[ asioDeviceIndex ]->name, - driverInfo, asioHostApi->systemSpecific ); - if( result != paNoError ) - return result; - } - - /* check that input device can support inputChannelCount */ - if( inputChannelCount > 0 ) - { - if( inputChannelCount > driverInfo->inputChannelCount ) - { - result = paInvalidChannelCount; - goto done; - } - } - - /* check that output device can support outputChannelCount */ - if( outputChannelCount ) - { - if( outputChannelCount > driverInfo->outputChannelCount ) - { - result = paInvalidChannelCount; - goto done; - } - } - - /* query for sample rate support */ - asioError = ASIOCanSampleRate( sampleRate ); - if( asioError == ASE_NoClock || asioError == ASE_NotPresent ) - { - result = paInvalidSampleRate; - goto done; - } - -done: - /* close the device if it wasn't already open */ - if( asioHostApi->openAsioDeviceIndex == paNoDevice ) - { - UnloadAsioDriver(); /* not sure if we should check for errors here */ - } - - if( result == paNoError ) - return paFormatIsSupported; - else - return result; -} - - - -/** A data structure specifically for storing blocking i/o related data. */ -typedef struct PaAsioStreamBlockingState -{ - int stopFlag; /**< Flag indicating that block processing is to be stopped. */ - - unsigned long writeBuffersRequested; /**< The number of available output buffers, requested by the #WriteStream() function. */ - unsigned long readFramesRequested; /**< The number of available input frames, requested by the #ReadStream() function. */ - - int writeBuffersRequestedFlag; /**< Flag to indicate that #WriteStream() has requested more output buffers to be available. */ - int readFramesRequestedFlag; /**< Flag to indicate that #ReadStream() requires more input frames to be available. */ - - HANDLE writeBuffersReadyEvent; /**< Event to signal that requested output buffers are available. */ - HANDLE readFramesReadyEvent; /**< Event to signal that requested input frames are available. */ - - void *writeRingBufferData; /**< The actual ring buffer memory, used by the output ring buffer. */ - void *readRingBufferData; /**< The actual ring buffer memory, used by the input ring buffer. */ - - PaUtilRingBuffer writeRingBuffer; /**< Frame-aligned blocking i/o ring buffer to store output data (interleaved user format). */ - PaUtilRingBuffer readRingBuffer; /**< Frame-aligned blocking i/o ring buffer to store input data (interleaved user format). */ - - long writeRingBufferInitialFrames; /**< The initial number of silent frames within the output ring buffer. */ - - const void **writeStreamBuffer; /**< Temp buffer, used by #WriteStream() for handling non-interleaved data. */ - void **readStreamBuffer; /**< Temp buffer, used by #ReadStream() for handling non-interleaved data. */ - - PaUtilBufferProcessor bufferProcessor; /**< Buffer processor, used to handle the blocking i/o ring buffers. */ - - int outputUnderflowFlag; /**< Flag to signal an output underflow from within the callback function. */ - int inputOverflowFlag; /**< Flag to signal an input overflow from within the callback function. */ -} -PaAsioStreamBlockingState; - - - -/* PaAsioStream - a stream data structure specifically for this implementation */ - -typedef struct PaAsioStream -{ - PaUtilStreamRepresentation streamRepresentation; - PaUtilCpuLoadMeasurer cpuLoadMeasurer; - PaUtilBufferProcessor bufferProcessor; - - PaAsioHostApiRepresentation *asioHostApi; - unsigned long framesPerHostCallback; - - /* ASIO driver info - these may not be needed for the life of the stream, - but store them here until we work out how format conversion is going - to work. */ - - ASIOBufferInfo *asioBufferInfos; - ASIOChannelInfo *asioChannelInfos; - long asioInputLatencyFrames, asioOutputLatencyFrames; // actual latencies returned by asio - - long inputChannelCount, outputChannelCount; - bool postOutput; - - void **bufferPtrs; /* this is carved up for inputBufferPtrs and outputBufferPtrs */ - void **inputBufferPtrs[2]; - void **outputBufferPtrs[2]; - - PaAsioBufferConverter *inputBufferConverter; - long inputShift; - PaAsioBufferConverter *outputBufferConverter; - long outputShift; - - volatile bool stopProcessing; - int stopPlayoutCount; - HANDLE completedBuffersPlayedEvent; - - bool streamFinishedCallbackCalled; - int isStopped; - volatile int isActive; - volatile bool zeroOutput; /* all future calls to the callback will output silence */ - - volatile long reenterCount; - volatile long reenterError; - - PaStreamCallbackFlags callbackFlags; - - PaAsioStreamBlockingState *blockingState; /**< Blocking i/o data struct, or NULL when using callback interface. */ -} -PaAsioStream; - -static PaAsioStream *theAsioStream = 0; /* due to ASIO sdk limitations there can be only one stream */ - - -static void ZeroOutputBuffers( PaAsioStream *stream, long index ) -{ - int i; - - for( i=0; i < stream->outputChannelCount; ++i ) - { - void *buffer = stream->asioBufferInfos[ i + stream->inputChannelCount ].buffers[index]; - - int bytesPerSample = BytesPerAsioSample( stream->asioChannelInfos[ i + stream->inputChannelCount ].type ); - - memset( buffer, 0, stream->framesPerHostCallback * bytesPerSample ); - } -} - - -/* return the next power of two >= x. - Returns the input parameter if it is already a power of two. - http://stackoverflow.com/questions/364985/algorithm-for-finding-the-smallest-power-of-two-thats-greater-or-equal-to-a-giv -*/ -static unsigned long NextPowerOfTwo( unsigned long x ) -{ - --x; - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - x |= x >> 8; - x |= x >> 16; - /* If you needed to deal with numbers > 2^32 the following would be needed. - For latencies, we don't deal with values this large. - x |= x >> 16; - */ - - return x + 1; -} - - -static unsigned long SelectHostBufferSizeForUnspecifiedUserFramesPerBuffer( - unsigned long targetBufferingLatencyFrames, PaAsioDriverInfo *driverInfo ) -{ - /* Choose a host buffer size based only on targetBufferingLatencyFrames and the - device's supported buffer sizes. Always returns a valid value. - */ - - unsigned long result; - - if( targetBufferingLatencyFrames <= (unsigned long)driverInfo->bufferMinSize ) - { - result = driverInfo->bufferMinSize; - } - else if( targetBufferingLatencyFrames >= (unsigned long)driverInfo->bufferMaxSize ) - { - result = driverInfo->bufferMaxSize; - } - else - { - if( driverInfo->bufferGranularity == 0 ) /* single fixed host buffer size */ - { - /* The documentation states that bufferGranularity should be zero - when bufferMinSize, bufferMaxSize and bufferPreferredSize are the - same. We assume that is the case. - */ - - result = driverInfo->bufferPreferredSize; - } - else if( driverInfo->bufferGranularity == -1 ) /* power-of-two */ - { - /* We assume bufferMinSize and bufferMaxSize are powers of two. */ - - result = NextPowerOfTwo( targetBufferingLatencyFrames ); - - if( result < (unsigned long)driverInfo->bufferMinSize ) - result = driverInfo->bufferMinSize; - - if( result > (unsigned long)driverInfo->bufferMaxSize ) - result = driverInfo->bufferMaxSize; - } - else /* modulo bufferGranularity */ - { - /* round up to the next multiple of granularity */ - unsigned long n = (targetBufferingLatencyFrames + driverInfo->bufferGranularity - 1) - / driverInfo->bufferGranularity; - - result = n * driverInfo->bufferGranularity; - - if( result < (unsigned long)driverInfo->bufferMinSize ) - result = driverInfo->bufferMinSize; - - if( result > (unsigned long)driverInfo->bufferMaxSize ) - result = driverInfo->bufferMaxSize; - } - } - - return result; -} - - -static unsigned long SelectHostBufferSizeForSpecifiedUserFramesPerBuffer( - unsigned long targetBufferingLatencyFrames, unsigned long userFramesPerBuffer, - PaAsioDriverInfo *driverInfo ) -{ - /* Select a host buffer size conforming to targetBufferingLatencyFrames - and the device's supported buffer sizes. - The return value will always be a multiple of userFramesPerBuffer. - If a valid buffer size can not be found the function returns 0. - - The current implementation uses a simple iterative search for clarity. - Feel free to suggest a closed form solution. - */ - unsigned long result = 0; - - assert( userFramesPerBuffer != 0 ); - - if( driverInfo->bufferGranularity == 0 ) /* single fixed host buffer size */ - { - /* The documentation states that bufferGranularity should be zero - when bufferMinSize, bufferMaxSize and bufferPreferredSize are the - same. We assume that is the case. - */ - - if( (driverInfo->bufferPreferredSize % userFramesPerBuffer) == 0 ) - result = driverInfo->bufferPreferredSize; - } - else if( driverInfo->bufferGranularity == -1 ) /* power-of-two */ - { - /* We assume bufferMinSize and bufferMaxSize are powers of two. */ - - /* Search all powers of two in the range [bufferMinSize,bufferMaxSize] - for multiples of userFramesPerBuffer. We prefer the first multiple - that is equal or greater than targetBufferingLatencyFrames, or - failing that, the largest multiple less than - targetBufferingLatencyFrames. - */ - unsigned long x = (unsigned long)driverInfo->bufferMinSize; - do { - if( (x % userFramesPerBuffer) == 0 ) - { - /* any multiple of userFramesPerBuffer is acceptable */ - result = x; - if( result >= targetBufferingLatencyFrames ) - break; /* stop. a value >= to targetBufferingLatencyFrames is ideal. */ - } - - x *= 2; - } while( x <= (unsigned long)driverInfo->bufferMaxSize ); - } - else /* modulo granularity */ - { - /* We assume bufferMinSize is a multiple of bufferGranularity. */ - - /* Search all multiples of bufferGranularity in the range - [bufferMinSize,bufferMaxSize] for multiples of userFramesPerBuffer. - We prefer the first multiple that is equal or greater than - targetBufferingLatencyFrames, or failing that, the largest multiple - less than targetBufferingLatencyFrames. - */ - unsigned long x = (unsigned long)driverInfo->bufferMinSize; - do { - if( (x % userFramesPerBuffer) == 0 ) - { - /* any multiple of userFramesPerBuffer is acceptable */ - result = x; - if( result >= targetBufferingLatencyFrames ) - break; /* stop. a value >= to targetBufferingLatencyFrames is ideal. */ - } - - x += driverInfo->bufferGranularity; - } while( x <= (unsigned long)driverInfo->bufferMaxSize ); - } - - return result; -} - - -static unsigned long SelectHostBufferSize( - unsigned long targetBufferingLatencyFrames, - unsigned long userFramesPerBuffer, PaAsioDriverInfo *driverInfo ) -{ - unsigned long result = 0; - - /* We select a host buffer size based on the following requirements - (in priority order): - - 1. The host buffer size must be permissible according to the ASIO - driverInfo buffer size constraints (min, max, granularity or - powers-of-two). - - 2. If the user specifies a non-zero framesPerBuffer parameter - (userFramesPerBuffer here) the host buffer should be a multiple of - this (subject to the constraints in (1) above). - - [NOTE: Where no permissible host buffer size is a multiple of - userFramesPerBuffer, we choose a value as if userFramesPerBuffer were - zero (i.e. we ignore it). This strategy is open for review ~ perhaps - there are still "more optimal" buffer sizes related to - userFramesPerBuffer that we could use.] - - 3. The host buffer size should be greater than or equal to - targetBufferingLatencyFrames, subject to (1) and (2) above. Where it - is not possible to select a host buffer size equal or greater than - targetBufferingLatencyFrames, the highest buffer size conforming to - (1) and (2) should be chosen. - */ - - if( userFramesPerBuffer != 0 ) - { - /* userFramesPerBuffer is specified, try to find a buffer size that's - a multiple of it */ - result = SelectHostBufferSizeForSpecifiedUserFramesPerBuffer( - targetBufferingLatencyFrames, userFramesPerBuffer, driverInfo ); - } - - if( result == 0 ) - { - /* either userFramesPerBuffer was not specified, or we couldn't find a - host buffer size that is a multiple of it. Select a host buffer size - according to targetBufferingLatencyFrames and the ASIO driverInfo - buffer size constraints. - */ - result = SelectHostBufferSizeForUnspecifiedUserFramesPerBuffer( - targetBufferingLatencyFrames, driverInfo ); - } - - return result; -} - - -/* returns channelSelectors if present */ - -static PaError ValidateAsioSpecificStreamInfo( - const PaStreamParameters *streamParameters, - const PaAsioStreamInfo *streamInfo, - int deviceChannelCount, - int **channelSelectors ) -{ - if( streamInfo ) - { - if( streamInfo->size != sizeof( PaAsioStreamInfo ) - || streamInfo->version != 1 ) - { - return paIncompatibleHostApiSpecificStreamInfo; - } - - if( streamInfo->flags & paAsioUseChannelSelectors ) - *channelSelectors = streamInfo->channelSelectors; - - if( !(*channelSelectors) ) - return paIncompatibleHostApiSpecificStreamInfo; - - for( int i=0; i < streamParameters->channelCount; ++i ){ - if( (*channelSelectors)[i] < 0 - || (*channelSelectors)[i] >= deviceChannelCount ){ - return paInvalidChannelCount; - } - } - } - - return paNoError; -} - - -static bool IsUsingExternalClockSource() -{ - bool result = false; - ASIOError asioError; - ASIOClockSource clocks[32]; - long numSources=32; - - /* davidv: listing ASIO Clock sources. there is an ongoing investigation by - me about whether or not to call ASIOSetSampleRate if an external Clock is - used. A few drivers expected different things here */ - - asioError = ASIOGetClockSources(clocks, &numSources); - if( asioError != ASE_OK ){ - PA_DEBUG(("ERROR: ASIOGetClockSources: %s\n", PaAsio_GetAsioErrorText(asioError) )); - }else{ - PA_DEBUG(("INFO ASIOGetClockSources listing %d clocks\n", numSources )); - for (int i=0;iopenAsioDeviceIndex != paNoDevice ) - { - PA_DEBUG(("OpenStream paDeviceUnavailable\n")); - return paDeviceUnavailable; - } - - assert( theAsioStream == 0 ); - - if( inputParameters && outputParameters ) - { - /* full duplex ASIO stream must use the same device for input and output */ - - if( inputParameters->device != outputParameters->device ) - { - PA_DEBUG(("OpenStream paBadIODeviceCombination\n")); - return paBadIODeviceCombination; - } - } - - if( inputParameters ) - { - inputChannelCount = inputParameters->channelCount; - inputSampleFormat = inputParameters->sampleFormat; - suggestedInputLatencyFrames = (unsigned long)((inputParameters->suggestedLatency * sampleRate)+0.5f); - - /* unless alternate device specification is supported, reject the use of - paUseHostApiSpecificDeviceSpecification */ - if( inputParameters->device == paUseHostApiSpecificDeviceSpecification ) - return paInvalidDevice; - - asioDeviceIndex = inputParameters->device; - - PaAsioDeviceInfo *asioDeviceInfo = (PaAsioDeviceInfo*)hostApi->deviceInfos[asioDeviceIndex]; - - /* validate hostApiSpecificStreamInfo */ - inputStreamInfo = (PaAsioStreamInfo*)inputParameters->hostApiSpecificStreamInfo; - result = ValidateAsioSpecificStreamInfo( inputParameters, inputStreamInfo, - asioDeviceInfo->commonDeviceInfo.maxInputChannels, - &inputChannelSelectors - ); - if( result != paNoError ) return result; - } - else - { - inputChannelCount = 0; - inputSampleFormat = 0; - suggestedInputLatencyFrames = 0; - } - - if( outputParameters ) - { - outputChannelCount = outputParameters->channelCount; - outputSampleFormat = outputParameters->sampleFormat; - suggestedOutputLatencyFrames = (unsigned long)((outputParameters->suggestedLatency * sampleRate)+0.5f); - - /* unless alternate device specification is supported, reject the use of - paUseHostApiSpecificDeviceSpecification */ - if( outputParameters->device == paUseHostApiSpecificDeviceSpecification ) - return paInvalidDevice; - - asioDeviceIndex = outputParameters->device; - - PaAsioDeviceInfo *asioDeviceInfo = (PaAsioDeviceInfo*)hostApi->deviceInfos[asioDeviceIndex]; - - /* validate hostApiSpecificStreamInfo */ - outputStreamInfo = (PaAsioStreamInfo*)outputParameters->hostApiSpecificStreamInfo; - result = ValidateAsioSpecificStreamInfo( outputParameters, outputStreamInfo, - asioDeviceInfo->commonDeviceInfo.maxOutputChannels, - &outputChannelSelectors - ); - if( result != paNoError ) return result; - } - else - { - outputChannelCount = 0; - outputSampleFormat = 0; - suggestedOutputLatencyFrames = 0; - } - - driverInfo = &asioHostApi->openAsioDriverInfo; - - /* NOTE: we load the driver and use its current settings - rather than the ones in our device info structure which may be stale */ - - result = LoadAsioDriver( asioHostApi, asioHostApi->inheritedHostApiRep.deviceInfos[ asioDeviceIndex ]->name, - driverInfo, asioHostApi->systemSpecific ); - if( result == paNoError ) - asioIsInitialized = 1; - else{ - PA_DEBUG(("OpenStream ERROR1 - LoadAsioDriver returned %d\n", result)); - goto error; - } - - /* check that input device can support inputChannelCount */ - if( inputChannelCount > 0 ) - { - if( inputChannelCount > driverInfo->inputChannelCount ) - { - result = paInvalidChannelCount; - PA_DEBUG(("OpenStream ERROR2\n")); - goto error; - } - } - - /* check that output device can support outputChannelCount */ - if( outputChannelCount ) - { - if( outputChannelCount > driverInfo->outputChannelCount ) - { - result = paInvalidChannelCount; - PA_DEBUG(("OpenStream ERROR3\n")); - goto error; - } - } - - result = ValidateAndSetSampleRate( sampleRate ); - if( result != paNoError ) - goto error; - - /* - IMPLEMENT ME: - - if a full duplex stream is requested, check that the combination - of input and output parameters is supported - */ - - /* validate platform specific flags */ - if( (streamFlags & paPlatformSpecificFlags) != 0 ){ - PA_DEBUG(("OpenStream invalid flags!!\n")); - return paInvalidFlag; /* unexpected platform specific flag */ - } - - - stream = (PaAsioStream*)PaUtil_AllocateMemory( sizeof(PaAsioStream) ); - if( !stream ) - { - result = paInsufficientMemory; - PA_DEBUG(("OpenStream ERROR5\n")); - goto error; - } - stream->blockingState = NULL; /* Blocking i/o not initialized, yet. */ - - - stream->completedBuffersPlayedEvent = CreateEvent( NULL, TRUE, FALSE, NULL ); - if( stream->completedBuffersPlayedEvent == NULL ) - { - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_SYSTEM_ERROR( GetLastError() ); - PA_DEBUG(("OpenStream ERROR6\n")); - goto error; - } - completedBuffersPlayedEventInited = 1; - - - stream->asioBufferInfos = 0; /* for deallocation in error */ - stream->asioChannelInfos = 0; /* for deallocation in error */ - stream->bufferPtrs = 0; /* for deallocation in error */ - - /* Using blocking i/o interface... */ - if( usingBlockingIo ) - { - /* Blocking i/o is implemented by running callback mode, using a special blocking i/o callback. */ - streamCallback = BlockingIoPaCallback; /* Setup PA to use the ASIO blocking i/o callback. */ - userData = &theAsioStream; /* The callback user data will be the PA ASIO stream. */ - PaUtil_InitializeStreamRepresentation( &stream->streamRepresentation, - &asioHostApi->blockingStreamInterface, streamCallback, userData ); - } - else /* Using callback interface... */ - { - PaUtil_InitializeStreamRepresentation( &stream->streamRepresentation, - &asioHostApi->callbackStreamInterface, streamCallback, userData ); - } - - - PaUtil_InitializeCpuLoadMeasurer( &stream->cpuLoadMeasurer, sampleRate ); - - - stream->asioBufferInfos = (ASIOBufferInfo*)PaUtil_AllocateMemory( - sizeof(ASIOBufferInfo) * (inputChannelCount + outputChannelCount) ); - if( !stream->asioBufferInfos ) - { - result = paInsufficientMemory; - PA_DEBUG(("OpenStream ERROR7\n")); - goto error; - } - - - for( i=0; i < inputChannelCount; ++i ) - { - ASIOBufferInfo *info = &stream->asioBufferInfos[i]; - - info->isInput = ASIOTrue; - - if( inputChannelSelectors ){ - // inputChannelSelectors values have already been validated in - // ValidateAsioSpecificStreamInfo() above - info->channelNum = inputChannelSelectors[i]; - }else{ - info->channelNum = i; - } - - info->buffers[0] = info->buffers[1] = 0; - } - - for( i=0; i < outputChannelCount; ++i ){ - ASIOBufferInfo *info = &stream->asioBufferInfos[inputChannelCount+i]; - - info->isInput = ASIOFalse; - - if( outputChannelSelectors ){ - // outputChannelSelectors values have already been validated in - // ValidateAsioSpecificStreamInfo() above - info->channelNum = outputChannelSelectors[i]; - }else{ - info->channelNum = i; - } - - info->buffers[0] = info->buffers[1] = 0; - } - - - /* Using blocking i/o interface... */ - if( usingBlockingIo ) - { -/** @todo REVIEW selection of host buffer size for blocking i/o */ - - framesPerHostBuffer = SelectHostBufferSize( 0, framesPerBuffer, driverInfo ); - - } - else /* Using callback interface... */ - { - /* Select the host buffer size based on user framesPerBuffer and the - maximum of suggestedInputLatencyFrames and - suggestedOutputLatencyFrames. - - We should subtract any fixed known driver latency from - suggestedLatencyFrames before computing the host buffer size. - However, the ASIO API doesn't provide a method for determining fixed - latencies independent of the host buffer size. ASIOGetLatencies() - only returns latencies after the buffer size has been configured, so - we can't reliably use it to determine fixed latencies here. - - We could set the preferred buffer size and then subtract it from - the values returned from ASIOGetLatencies, but this would not be 100% - reliable, so we don't do it. - */ - - unsigned long targetBufferingLatencyFrames = - (( suggestedInputLatencyFrames > suggestedOutputLatencyFrames ) - ? suggestedInputLatencyFrames - : suggestedOutputLatencyFrames); - - framesPerHostBuffer = SelectHostBufferSize( targetBufferingLatencyFrames, - framesPerBuffer, driverInfo ); - } - - - PA_DEBUG(("PaAsioOpenStream: framesPerHostBuffer :%d\n", framesPerHostBuffer)); - - asioError = ASIOCreateBuffers( stream->asioBufferInfos, - inputChannelCount+outputChannelCount, - framesPerHostBuffer, &asioCallbacks_ ); - - if( asioError != ASE_OK - && framesPerHostBuffer != (unsigned long)driverInfo->bufferPreferredSize ) - { - PA_DEBUG(("ERROR: ASIOCreateBuffers: %s\n", PaAsio_GetAsioErrorText(asioError) )); - /* - Some buggy drivers (like the Hoontech DSP24) give incorrect - [min, preferred, max] values They should work with the preferred size - value, thus if Pa_ASIO_CreateBuffers fails with the hostBufferSize - computed in SelectHostBufferSize, we try again with the preferred size. - */ - - framesPerHostBuffer = driverInfo->bufferPreferredSize; - - PA_DEBUG(("PaAsioOpenStream: CORRECTED framesPerHostBuffer :%d\n", framesPerHostBuffer)); - - ASIOError asioError2 = ASIOCreateBuffers( stream->asioBufferInfos, - inputChannelCount+outputChannelCount, - framesPerHostBuffer, &asioCallbacks_ ); - if( asioError2 == ASE_OK ) - asioError = ASE_OK; - } - - if( asioError != ASE_OK ) - { - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_ASIO_ERROR( asioError ); - PA_DEBUG(("OpenStream ERROR9\n")); - goto error; - } - - asioBuffersCreated = 1; - - stream->asioChannelInfos = (ASIOChannelInfo*)PaUtil_AllocateMemory( - sizeof(ASIOChannelInfo) * (inputChannelCount + outputChannelCount) ); - if( !stream->asioChannelInfos ) - { - result = paInsufficientMemory; - PA_DEBUG(("OpenStream ERROR10\n")); - goto error; - } - - for( i=0; i < inputChannelCount + outputChannelCount; ++i ) - { - stream->asioChannelInfos[i].channel = stream->asioBufferInfos[i].channelNum; - stream->asioChannelInfos[i].isInput = stream->asioBufferInfos[i].isInput; - asioError = ASIOGetChannelInfo( &stream->asioChannelInfos[i] ); - if( asioError != ASE_OK ) - { - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_ASIO_ERROR( asioError ); - PA_DEBUG(("OpenStream ERROR11\n")); - goto error; - } - } - - stream->bufferPtrs = (void**)PaUtil_AllocateMemory( - 2 * sizeof(void*) * (inputChannelCount + outputChannelCount) ); - if( !stream->bufferPtrs ) - { - result = paInsufficientMemory; - PA_DEBUG(("OpenStream ERROR12\n")); - goto error; - } - - if( inputChannelCount > 0 ) - { - stream->inputBufferPtrs[0] = stream-> bufferPtrs; - stream->inputBufferPtrs[1] = &stream->bufferPtrs[inputChannelCount]; - - for( i=0; iinputBufferPtrs[0][i] = stream->asioBufferInfos[i].buffers[0]; - stream->inputBufferPtrs[1][i] = stream->asioBufferInfos[i].buffers[1]; - } - } - else - { - stream->inputBufferPtrs[0] = 0; - stream->inputBufferPtrs[1] = 0; - } - - if( outputChannelCount > 0 ) - { - stream->outputBufferPtrs[0] = &stream->bufferPtrs[inputChannelCount*2]; - stream->outputBufferPtrs[1] = &stream->bufferPtrs[inputChannelCount*2 + outputChannelCount]; - - for( i=0; ioutputBufferPtrs[0][i] = stream->asioBufferInfos[inputChannelCount+i].buffers[0]; - stream->outputBufferPtrs[1][i] = stream->asioBufferInfos[inputChannelCount+i].buffers[1]; - } - } - else - { - stream->outputBufferPtrs[0] = 0; - stream->outputBufferPtrs[1] = 0; - } - - if( inputChannelCount > 0 ) - { - /* FIXME: assume all channels use the same type for now - - see: "ASIO devices with multiple sample formats are unsupported" - http://www.portaudio.com/trac/ticket/106 - */ - ASIOSampleType inputType = stream->asioChannelInfos[0].type; - - PA_DEBUG(("ASIO Input type:%d",inputType)); - AsioSampleTypeLOG(inputType); - hostInputSampleFormat = AsioSampleTypeToPaNativeSampleFormat( inputType ); - - SelectAsioToPaConverter( inputType, &stream->inputBufferConverter, &stream->inputShift ); - } - else - { - hostInputSampleFormat = 0; - stream->inputBufferConverter = 0; - } - - if( outputChannelCount > 0 ) - { - /* FIXME: assume all channels use the same type for now - - see: "ASIO devices with multiple sample formats are unsupported" - http://www.portaudio.com/trac/ticket/106 - */ - ASIOSampleType outputType = stream->asioChannelInfos[inputChannelCount].type; - - PA_DEBUG(("ASIO Output type:%d",outputType)); - AsioSampleTypeLOG(outputType); - hostOutputSampleFormat = AsioSampleTypeToPaNativeSampleFormat( outputType ); - - SelectPaToAsioConverter( outputType, &stream->outputBufferConverter, &stream->outputShift ); - } - else - { - hostOutputSampleFormat = 0; - stream->outputBufferConverter = 0; - } - - /* Values returned by ASIOGetLatencies() include the latency introduced by - the ASIO double buffer. */ - ASIOGetLatencies( &stream->asioInputLatencyFrames, &stream->asioOutputLatencyFrames ); - - - /* Using blocking i/o interface... */ - if( usingBlockingIo ) - { - /* Allocate the blocking i/o input ring buffer memory. */ - stream->blockingState = (PaAsioStreamBlockingState*)PaUtil_AllocateMemory( sizeof(PaAsioStreamBlockingState) ); - if( !stream->blockingState ) - { - result = paInsufficientMemory; - PA_DEBUG(("ERROR! Blocking i/o interface struct allocation failed in OpenStream()\n")); - goto error; - } - - /* Initialize blocking i/o interface struct. */ - stream->blockingState->readFramesReadyEvent = NULL; /* Uninitialized, yet. */ - stream->blockingState->writeBuffersReadyEvent = NULL; /* Uninitialized, yet. */ - stream->blockingState->readRingBufferData = NULL; /* Uninitialized, yet. */ - stream->blockingState->writeRingBufferData = NULL; /* Uninitialized, yet. */ - stream->blockingState->readStreamBuffer = NULL; /* Uninitialized, yet. */ - stream->blockingState->writeStreamBuffer = NULL; /* Uninitialized, yet. */ - stream->blockingState->stopFlag = TRUE; /* Not started, yet. */ - - - /* If the user buffer is unspecified */ - if( framesPerBuffer == paFramesPerBufferUnspecified ) - { - /* Make the user buffer the same size as the host buffer. */ - framesPerBuffer = framesPerHostBuffer; - } - - - /* Initialize callback buffer processor. */ - result = PaUtil_InitializeBufferProcessor( &stream->bufferProcessor , - inputChannelCount , - inputSampleFormat & ~paNonInterleaved , /* Ring buffer. */ - (hostInputSampleFormat | paNonInterleaved), /* Host format. */ - outputChannelCount , - outputSampleFormat & ~paNonInterleaved, /* Ring buffer. */ - (hostOutputSampleFormat | paNonInterleaved), /* Host format. */ - sampleRate , - streamFlags , - framesPerBuffer , /* Frames per ring buffer block. */ - framesPerHostBuffer , /* Frames per asio buffer. */ - paUtilFixedHostBufferSize , - streamCallback , - userData ); - if( result != paNoError ){ - PA_DEBUG(("OpenStream ERROR13\n")); - goto error; - } - callbackBufferProcessorInited = TRUE; - - /* Initialize the blocking i/o buffer processor. */ - result = PaUtil_InitializeBufferProcessor(&stream->blockingState->bufferProcessor, - inputChannelCount , - inputSampleFormat , /* User format. */ - inputSampleFormat & ~paNonInterleaved , /* Ring buffer. */ - outputChannelCount , - outputSampleFormat , /* User format. */ - outputSampleFormat & ~paNonInterleaved, /* Ring buffer. */ - sampleRate , - paClipOff | paDitherOff , /* Don't use dither nor clipping. */ - framesPerBuffer , /* Frames per user buffer. */ - framesPerBuffer , /* Frames per ring buffer block. */ - paUtilBoundedHostBufferSize , - NULL, NULL );/* No callback! */ - if( result != paNoError ){ - PA_DEBUG(("ERROR! Blocking i/o buffer processor initialization failed in OpenStream()\n")); - goto error; - } - blockingBufferProcessorInited = TRUE; - - /* If input is requested. */ - if( inputChannelCount ) - { - /* Create the callback sync-event. */ - stream->blockingState->readFramesReadyEvent = CreateEvent( NULL, FALSE, FALSE, NULL ); - if( stream->blockingState->readFramesReadyEvent == NULL ) - { - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_SYSTEM_ERROR( GetLastError() ); - PA_DEBUG(("ERROR! Blocking i/o \"read frames ready\" event creation failed in OpenStream()\n")); - goto error; - } - blockingReadFramesReadyEventInitialized = 1; - - - /* Create pointer buffer to access non-interleaved data in ReadStream() */ - stream->blockingState->readStreamBuffer = (void**)PaUtil_AllocateMemory( sizeof(void*) * inputChannelCount ); - if( !stream->blockingState->readStreamBuffer ) - { - result = paInsufficientMemory; - PA_DEBUG(("ERROR! Blocking i/o read stream buffer allocation failed in OpenStream()\n")); - goto error; - } - - /* The ring buffer should store as many data blocks as needed - to achieve the requested latency. Whereas it must be large - enough to store at least two complete data blocks. - - 1) Determine the amount of latency to be added to the - preferred ASIO latency. - 2) Make sure we have at lest one additional latency frame. - 3) Divide the number of frames by the desired block size to - get the number (rounded up to pure integer) of blocks to - be stored in the buffer. - 4) Add one additional block for block processing and convert - to samples frames. - 5) Get the next larger (or equal) power-of-two buffer size. - */ - lBlockingBufferSize = suggestedInputLatencyFrames - stream->asioInputLatencyFrames; - lBlockingBufferSize = (lBlockingBufferSize > 0) ? lBlockingBufferSize : 1; - lBlockingBufferSize = (lBlockingBufferSize + framesPerBuffer - 1) / framesPerBuffer; - lBlockingBufferSize = (lBlockingBufferSize + 1) * framesPerBuffer; - - /* Get the next larger or equal power-of-two buffersize. */ - lBlockingBufferSizePow2 = 1; - while( lBlockingBufferSize > (lBlockingBufferSizePow2<<=1) ); - lBlockingBufferSize = lBlockingBufferSizePow2; - - /* Compute total input latency in seconds */ - stream->streamRepresentation.streamInfo.inputLatency = - (double)( PaUtil_GetBufferProcessorInputLatencyFrames(&stream->bufferProcessor ) - + PaUtil_GetBufferProcessorInputLatencyFrames(&stream->blockingState->bufferProcessor) - + (lBlockingBufferSize / framesPerBuffer - 1) * framesPerBuffer - + stream->asioInputLatencyFrames ) - / sampleRate; - - /* The code below prints the ASIO latency which doesn't include - the buffer processor latency nor the blocking i/o latency. It - reports the added latency separately. - */ - PA_DEBUG(("PaAsio : ASIO InputLatency = %ld (%ld ms),\n added buffProc:%ld (%ld ms),\n added blocking:%ld (%ld ms)\n", - stream->asioInputLatencyFrames, - (long)( stream->asioInputLatencyFrames * (1000.0 / sampleRate) ), - PaUtil_GetBufferProcessorInputLatencyFrames(&stream->bufferProcessor), - (long)( PaUtil_GetBufferProcessorInputLatencyFrames(&stream->bufferProcessor) * (1000.0 / sampleRate) ), - PaUtil_GetBufferProcessorInputLatencyFrames(&stream->blockingState->bufferProcessor) + (lBlockingBufferSize / framesPerBuffer - 1) * framesPerBuffer, - (long)( (PaUtil_GetBufferProcessorInputLatencyFrames(&stream->blockingState->bufferProcessor) + (lBlockingBufferSize / framesPerBuffer - 1) * framesPerBuffer) * (1000.0 / sampleRate) ) - )); - - /* Determine the size of ring buffer in bytes. */ - lBytesPerFrame = inputChannelCount * Pa_GetSampleSize(inputSampleFormat ); - - /* Allocate the blocking i/o input ring buffer memory. */ - stream->blockingState->readRingBufferData = (void*)PaUtil_AllocateMemory( lBlockingBufferSize * lBytesPerFrame ); - if( !stream->blockingState->readRingBufferData ) - { - result = paInsufficientMemory; - PA_DEBUG(("ERROR! Blocking i/o input ring buffer allocation failed in OpenStream()\n")); - goto error; - } - - /* Initialize the input ring buffer struct. */ - PaUtil_InitializeRingBuffer( &stream->blockingState->readRingBuffer , - lBytesPerFrame , - lBlockingBufferSize , - stream->blockingState->readRingBufferData ); - } - - /* If output is requested. */ - if( outputChannelCount ) - { - stream->blockingState->writeBuffersReadyEvent = CreateEvent( NULL, FALSE, FALSE, NULL ); - if( stream->blockingState->writeBuffersReadyEvent == NULL ) - { - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_SYSTEM_ERROR( GetLastError() ); - PA_DEBUG(("ERROR! Blocking i/o \"write buffers ready\" event creation failed in OpenStream()\n")); - goto error; - } - blockingWriteBuffersReadyEventInitialized = 1; - - /* Create pointer buffer to access non-interleaved data in WriteStream() */ - stream->blockingState->writeStreamBuffer = (const void**)PaUtil_AllocateMemory( sizeof(const void*) * outputChannelCount ); - if( !stream->blockingState->writeStreamBuffer ) - { - result = paInsufficientMemory; - PA_DEBUG(("ERROR! Blocking i/o write stream buffer allocation failed in OpenStream()\n")); - goto error; - } - - /* The ring buffer should store as many data blocks as needed - to achieve the requested latency. Whereas it must be large - enough to store at least two complete data blocks. - - 1) Determine the amount of latency to be added to the - preferred ASIO latency. - 2) Make sure we have at lest one additional latency frame. - 3) Divide the number of frames by the desired block size to - get the number (rounded up to pure integer) of blocks to - be stored in the buffer. - 4) Add one additional block for block processing and convert - to samples frames. - 5) Get the next larger (or equal) power-of-two buffer size. - */ - lBlockingBufferSize = suggestedOutputLatencyFrames - stream->asioOutputLatencyFrames; - lBlockingBufferSize = (lBlockingBufferSize > 0) ? lBlockingBufferSize : 1; - lBlockingBufferSize = (lBlockingBufferSize + framesPerBuffer - 1) / framesPerBuffer; - lBlockingBufferSize = (lBlockingBufferSize + 1) * framesPerBuffer; - - /* The buffer size (without the additional block) corresponds - to the initial number of silent samples in the output ring - buffer. */ - stream->blockingState->writeRingBufferInitialFrames = lBlockingBufferSize - framesPerBuffer; - - /* Get the next larger or equal power-of-two buffersize. */ - lBlockingBufferSizePow2 = 1; - while( lBlockingBufferSize > (lBlockingBufferSizePow2<<=1) ); - lBlockingBufferSize = lBlockingBufferSizePow2; - - /* Compute total output latency in seconds */ - stream->streamRepresentation.streamInfo.outputLatency = - (double)( PaUtil_GetBufferProcessorOutputLatencyFrames(&stream->bufferProcessor) - + PaUtil_GetBufferProcessorOutputLatencyFrames(&stream->blockingState->bufferProcessor) - + (lBlockingBufferSize / framesPerBuffer - 1) * framesPerBuffer - + stream->asioOutputLatencyFrames ) - / sampleRate; - - /* The code below prints the ASIO latency which doesn't include - the buffer processor latency nor the blocking i/o latency. It - reports the added latency separately. - */ - PA_DEBUG(("PaAsio : ASIO OutputLatency = %ld (%ld ms),\n added buffProc:%ld (%ld ms),\n added blocking:%ld (%ld ms)\n", - stream->asioOutputLatencyFrames, - (long)( stream->asioOutputLatencyFrames * (1000.0 / sampleRate) ), - PaUtil_GetBufferProcessorOutputLatencyFrames(&stream->bufferProcessor), - (long)( PaUtil_GetBufferProcessorOutputLatencyFrames(&stream->bufferProcessor) * (1000.0 / sampleRate) ), - PaUtil_GetBufferProcessorOutputLatencyFrames(&stream->blockingState->bufferProcessor) + (lBlockingBufferSize / framesPerBuffer - 1) * framesPerBuffer, - (long)( (PaUtil_GetBufferProcessorOutputLatencyFrames(&stream->blockingState->bufferProcessor) + (lBlockingBufferSize / framesPerBuffer - 1) * framesPerBuffer) * (1000.0 / sampleRate) ) - )); - - /* Determine the size of ring buffer in bytes. */ - lBytesPerFrame = outputChannelCount * Pa_GetSampleSize(outputSampleFormat); - - /* Allocate the blocking i/o output ring buffer memory. */ - stream->blockingState->writeRingBufferData = (void*)PaUtil_AllocateMemory( lBlockingBufferSize * lBytesPerFrame ); - if( !stream->blockingState->writeRingBufferData ) - { - result = paInsufficientMemory; - PA_DEBUG(("ERROR! Blocking i/o output ring buffer allocation failed in OpenStream()\n")); - goto error; - } - - /* Initialize the output ring buffer struct. */ - PaUtil_InitializeRingBuffer( &stream->blockingState->writeRingBuffer , - lBytesPerFrame , - lBlockingBufferSize , - stream->blockingState->writeRingBufferData ); - } - - stream->streamRepresentation.streamInfo.sampleRate = sampleRate; - - - } - else /* Using callback interface... */ - { - result = PaUtil_InitializeBufferProcessor( &stream->bufferProcessor, - inputChannelCount, inputSampleFormat, (hostInputSampleFormat | paNonInterleaved), - outputChannelCount, outputSampleFormat, (hostOutputSampleFormat | paNonInterleaved), - sampleRate, streamFlags, framesPerBuffer, - framesPerHostBuffer, paUtilFixedHostBufferSize, - streamCallback, userData ); - if( result != paNoError ){ - PA_DEBUG(("OpenStream ERROR13\n")); - goto error; - } - callbackBufferProcessorInited = TRUE; - - stream->streamRepresentation.streamInfo.inputLatency = - (double)( PaUtil_GetBufferProcessorInputLatencyFrames(&stream->bufferProcessor) - + stream->asioInputLatencyFrames) / sampleRate; // seconds - stream->streamRepresentation.streamInfo.outputLatency = - (double)( PaUtil_GetBufferProcessorOutputLatencyFrames(&stream->bufferProcessor) - + stream->asioOutputLatencyFrames) / sampleRate; // seconds - stream->streamRepresentation.streamInfo.sampleRate = sampleRate; - - // the code below prints the ASIO latency which doesn't include the - // buffer processor latency. it reports the added latency separately - PA_DEBUG(("PaAsio : ASIO InputLatency = %ld (%ld ms), added buffProc:%ld (%ld ms)\n", - stream->asioInputLatencyFrames, - (long)((stream->asioInputLatencyFrames*1000)/ sampleRate), - PaUtil_GetBufferProcessorInputLatencyFrames(&stream->bufferProcessor), - (long)((PaUtil_GetBufferProcessorInputLatencyFrames(&stream->bufferProcessor)*1000)/ sampleRate) - )); - - PA_DEBUG(("PaAsio : ASIO OuputLatency = %ld (%ld ms), added buffProc:%ld (%ld ms)\n", - stream->asioOutputLatencyFrames, - (long)((stream->asioOutputLatencyFrames*1000)/ sampleRate), - PaUtil_GetBufferProcessorOutputLatencyFrames(&stream->bufferProcessor), - (long)((PaUtil_GetBufferProcessorOutputLatencyFrames(&stream->bufferProcessor)*1000)/ sampleRate) - )); - } - - stream->asioHostApi = asioHostApi; - stream->framesPerHostCallback = framesPerHostBuffer; - - stream->inputChannelCount = inputChannelCount; - stream->outputChannelCount = outputChannelCount; - stream->postOutput = driverInfo->postOutput; - stream->isStopped = 1; - stream->isActive = 0; - - asioHostApi->openAsioDeviceIndex = asioDeviceIndex; - - theAsioStream = stream; - *s = (PaStream*)stream; - - return result; - -error: - PA_DEBUG(("goto errored\n")); - if( stream ) - { - if( stream->blockingState ) - { - if( blockingBufferProcessorInited ) - PaUtil_TerminateBufferProcessor( &stream->blockingState->bufferProcessor ); - - if( stream->blockingState->writeRingBufferData ) - PaUtil_FreeMemory( stream->blockingState->writeRingBufferData ); - if( stream->blockingState->writeStreamBuffer ) - PaUtil_FreeMemory( stream->blockingState->writeStreamBuffer ); - if( blockingWriteBuffersReadyEventInitialized ) - CloseHandle( stream->blockingState->writeBuffersReadyEvent ); - - if( stream->blockingState->readRingBufferData ) - PaUtil_FreeMemory( stream->blockingState->readRingBufferData ); - if( stream->blockingState->readStreamBuffer ) - PaUtil_FreeMemory( stream->blockingState->readStreamBuffer ); - if( blockingReadFramesReadyEventInitialized ) - CloseHandle( stream->blockingState->readFramesReadyEvent ); - - PaUtil_FreeMemory( stream->blockingState ); - } - - if( callbackBufferProcessorInited ) - PaUtil_TerminateBufferProcessor( &stream->bufferProcessor ); - - if( completedBuffersPlayedEventInited ) - CloseHandle( stream->completedBuffersPlayedEvent ); - - if( stream->asioBufferInfos ) - PaUtil_FreeMemory( stream->asioBufferInfos ); - - if( stream->asioChannelInfos ) - PaUtil_FreeMemory( stream->asioChannelInfos ); - - if( stream->bufferPtrs ) - PaUtil_FreeMemory( stream->bufferPtrs ); - - PaUtil_FreeMemory( stream ); - } - - if( asioBuffersCreated ) - ASIODisposeBuffers(); - - if( asioIsInitialized ) - { - UnloadAsioDriver(); - } - return result; -} - - -/* - When CloseStream() is called, the multi-api layer ensures that - the stream has already been stopped or aborted. -*/ -static PaError CloseStream( PaStream* s ) -{ - PaError result = paNoError; - PaAsioStream *stream = (PaAsioStream*)s; - - /* - IMPLEMENT ME: - - additional stream closing + cleanup - */ - - PaUtil_TerminateBufferProcessor( &stream->bufferProcessor ); - PaUtil_TerminateStreamRepresentation( &stream->streamRepresentation ); - - stream->asioHostApi->openAsioDeviceIndex = paNoDevice; - - CloseHandle( stream->completedBuffersPlayedEvent ); - - /* Using blocking i/o interface... */ - if( stream->blockingState ) - { - PaUtil_TerminateBufferProcessor( &stream->blockingState->bufferProcessor ); - - if( stream->inputChannelCount ) { - PaUtil_FreeMemory( stream->blockingState->readRingBufferData ); - PaUtil_FreeMemory( stream->blockingState->readStreamBuffer ); - CloseHandle( stream->blockingState->readFramesReadyEvent ); - } - if( stream->outputChannelCount ) { - PaUtil_FreeMemory( stream->blockingState->writeRingBufferData ); - PaUtil_FreeMemory( stream->blockingState->writeStreamBuffer ); - CloseHandle( stream->blockingState->writeBuffersReadyEvent ); - } - - PaUtil_FreeMemory( stream->blockingState ); - } - - PaUtil_FreeMemory( stream->asioBufferInfos ); - PaUtil_FreeMemory( stream->asioChannelInfos ); - PaUtil_FreeMemory( stream->bufferPtrs ); - PaUtil_FreeMemory( stream ); - - ASIODisposeBuffers(); - UnloadAsioDriver(); - - theAsioStream = 0; - - return result; -} - - -static void bufferSwitch(long index, ASIOBool directProcess) -{ -//TAKEN FROM THE ASIO SDK - - // the actual processing callback. - // Beware that this is normally in a separate thread, hence be sure that - // you take care about thread synchronization. This is omitted here for - // simplicity. - - // as this is a "back door" into the bufferSwitchTimeInfo a timeInfo needs - // to be created though it will only set the timeInfo.samplePosition and - // timeInfo.systemTime fields and the according flags - - ASIOTime timeInfo; - memset( &timeInfo, 0, sizeof (timeInfo) ); - - // get the time stamp of the buffer, not necessary if no - // synchronization to other media is required - if( ASIOGetSamplePosition(&timeInfo.timeInfo.samplePosition, &timeInfo.timeInfo.systemTime) == ASE_OK) - timeInfo.timeInfo.flags = kSystemTimeValid | kSamplePositionValid; - - // Call the real callback - bufferSwitchTimeInfo( &timeInfo, index, directProcess ); -} - - -// conversion from 64 bit ASIOSample/ASIOTimeStamp to double float -#if NATIVE_INT64 - #define ASIO64toDouble(a) (a) -#else - const double twoRaisedTo32 = 4294967296.; - #define ASIO64toDouble(a) ((a).lo + (a).hi * twoRaisedTo32) -#endif - -static ASIOTime *bufferSwitchTimeInfo( ASIOTime *timeInfo, long index, ASIOBool directProcess ) -{ - // the actual processing callback. - // Beware that this is normally in a separate thread, hence be sure that - // you take care about thread synchronization. - - - /* The SDK says the following about the directProcess flag: - suggests to the host whether it should immediately start processing - (directProcess == ASIOTrue), or whether its process should be deferred - because the call comes from a very low level (for instance, a high level - priority interrupt), and direct processing would cause timing instabilities for - the rest of the system. If in doubt, directProcess should be set to ASIOFalse. - - We just ignore directProcess. This could cause incompatibilities with - drivers which really don't want the audio processing to occur in this - callback, but none have been identified yet. - */ - - (void) directProcess; /* suppress unused parameter warning */ - -#if 0 - // store the timeInfo for later use - asioDriverInfo.tInfo = *timeInfo; - - // get the time stamp of the buffer, not necessary if no - // synchronization to other media is required - - if (timeInfo->timeInfo.flags & kSystemTimeValid) - asioDriverInfo.nanoSeconds = ASIO64toDouble(timeInfo->timeInfo.systemTime); - else - asioDriverInfo.nanoSeconds = 0; - - if (timeInfo->timeInfo.flags & kSamplePositionValid) - asioDriverInfo.samples = ASIO64toDouble(timeInfo->timeInfo.samplePosition); - else - asioDriverInfo.samples = 0; - - if (timeInfo->timeCode.flags & kTcValid) - asioDriverInfo.tcSamples = ASIO64toDouble(timeInfo->timeCode.timeCodeSamples); - else - asioDriverInfo.tcSamples = 0; - - // get the system reference time - asioDriverInfo.sysRefTime = get_sys_reference_time(); -#endif - -#if 0 - // a few debug messages for the Windows device driver developer - // tells you the time when driver got its interrupt and the delay until the app receives - // the event notification. - static double last_samples = 0; - char tmp[128]; - sprintf (tmp, "diff: %d / %d ms / %d ms / %d samples \n", asioDriverInfo.sysRefTime - (long)(asioDriverInfo.nanoSeconds / 1000000.0), asioDriverInfo.sysRefTime, (long)(asioDriverInfo.nanoSeconds / 1000000.0), (long)(asioDriverInfo.samples - last_samples)); - OutputDebugString (tmp); - last_samples = asioDriverInfo.samples; -#endif - - - if( !theAsioStream ) - return 0L; - - // protect against reentrancy - if( PaAsio_AtomicIncrement(&theAsioStream->reenterCount) ) - { - theAsioStream->reenterError++; - //DBUG(("bufferSwitchTimeInfo : reentrancy detection = %d\n", asioDriverInfo.reenterError)); - return 0L; - } - - int buffersDone = 0; - - do - { - if( buffersDone > 0 ) - { - // this is a reentered buffer, we missed processing it on time - // set the input overflow and output underflow flags as appropriate - - if( theAsioStream->inputChannelCount > 0 ) - theAsioStream->callbackFlags |= paInputOverflow; - - if( theAsioStream->outputChannelCount > 0 ) - theAsioStream->callbackFlags |= paOutputUnderflow; - } - else - { - if( theAsioStream->zeroOutput ) - { - ZeroOutputBuffers( theAsioStream, index ); - - // Finally if the driver supports the ASIOOutputReady() optimization, - // do it here, all data are in place - if( theAsioStream->postOutput ) - ASIOOutputReady(); - - if( theAsioStream->stopProcessing ) - { - if( theAsioStream->stopPlayoutCount < 2 ) - { - ++theAsioStream->stopPlayoutCount; - if( theAsioStream->stopPlayoutCount == 2 ) - { - theAsioStream->isActive = 0; - if( theAsioStream->streamRepresentation.streamFinishedCallback != 0 ) - theAsioStream->streamRepresentation.streamFinishedCallback( theAsioStream->streamRepresentation.userData ); - theAsioStream->streamFinishedCallbackCalled = true; - SetEvent( theAsioStream->completedBuffersPlayedEvent ); - } - } - } - } - else - { - -#if 0 -/* - see: "ASIO callback underflow/overflow buffer slip detection doesn't work" - http://www.portaudio.com/trac/ticket/110 -*/ - -// test code to try to detect slip conditions... these may work on some systems -// but neither of them work on the RME Digi96 - -// check that sample delta matches buffer size (otherwise we must have skipped -// a buffer. -static double last_samples = -512; -double samples; -//if( timeInfo->timeCode.flags & kTcValid ) -// samples = ASIO64toDouble(timeInfo->timeCode.timeCodeSamples); -//else - samples = ASIO64toDouble(timeInfo->timeInfo.samplePosition); -int delta = samples - last_samples; -//printf( "%d\n", delta); -last_samples = samples; - -if( delta > theAsioStream->framesPerHostCallback ) -{ - if( theAsioStream->inputChannelCount > 0 ) - theAsioStream->callbackFlags |= paInputOverflow; - - if( theAsioStream->outputChannelCount > 0 ) - theAsioStream->callbackFlags |= paOutputUnderflow; -} - -// check that the buffer index is not the previous index (which would indicate -// that a buffer was skipped. -static int previousIndex = 1; -if( index == previousIndex ) -{ - if( theAsioStream->inputChannelCount > 0 ) - theAsioStream->callbackFlags |= paInputOverflow; - - if( theAsioStream->outputChannelCount > 0 ) - theAsioStream->callbackFlags |= paOutputUnderflow; -} -previousIndex = index; -#endif - - int i; - - PaUtil_BeginCpuLoadMeasurement( &theAsioStream->cpuLoadMeasurer ); - - PaStreamCallbackTimeInfo paTimeInfo; - - // asio systemTime is supposed to be measured according to the same - // clock as timeGetTime - paTimeInfo.currentTime = (ASIO64toDouble( timeInfo->timeInfo.systemTime ) * .000000001); - - /* patch from Paul Boege */ - paTimeInfo.inputBufferAdcTime = paTimeInfo.currentTime - - ((double)theAsioStream->asioInputLatencyFrames/theAsioStream->streamRepresentation.streamInfo.sampleRate); - - paTimeInfo.outputBufferDacTime = paTimeInfo.currentTime + - ((double)theAsioStream->asioOutputLatencyFrames/theAsioStream->streamRepresentation.streamInfo.sampleRate); - - /* old version is buggy because the buffer processor also adds in its latency to the time parameters - paTimeInfo.inputBufferAdcTime = paTimeInfo.currentTime - theAsioStream->streamRepresentation.streamInfo.inputLatency; - paTimeInfo.outputBufferDacTime = paTimeInfo.currentTime + theAsioStream->streamRepresentation.streamInfo.outputLatency; - */ - -/* Disabled! Stopping and re-starting the stream causes an input overflow / output underflow. S.Fischer */ -#if 0 -// detect underflows by checking inter-callback time > 2 buffer period -static double previousTime = -1; -if( previousTime > 0 ){ - - double delta = paTimeInfo.currentTime - previousTime; - - if( delta >= 2. * (theAsioStream->framesPerHostCallback / theAsioStream->streamRepresentation.streamInfo.sampleRate) ){ - if( theAsioStream->inputChannelCount > 0 ) - theAsioStream->callbackFlags |= paInputOverflow; - - if( theAsioStream->outputChannelCount > 0 ) - theAsioStream->callbackFlags |= paOutputUnderflow; - } -} -previousTime = paTimeInfo.currentTime; -#endif - - // note that the above input and output times do not need to be - // adjusted for the latency of the buffer processor -- the buffer - // processor handles that. - - if( theAsioStream->inputBufferConverter ) - { - for( i=0; iinputChannelCount; i++ ) - { - theAsioStream->inputBufferConverter( theAsioStream->inputBufferPtrs[index][i], - theAsioStream->inputShift, theAsioStream->framesPerHostCallback ); - } - } - - PaUtil_BeginBufferProcessing( &theAsioStream->bufferProcessor, &paTimeInfo, theAsioStream->callbackFlags ); - - /* reset status flags once they've been passed to the callback */ - theAsioStream->callbackFlags = 0; - - PaUtil_SetInputFrameCount( &theAsioStream->bufferProcessor, 0 /* default to host buffer size */ ); - for( i=0; iinputChannelCount; ++i ) - PaUtil_SetNonInterleavedInputChannel( &theAsioStream->bufferProcessor, i, theAsioStream->inputBufferPtrs[index][i] ); - - PaUtil_SetOutputFrameCount( &theAsioStream->bufferProcessor, 0 /* default to host buffer size */ ); - for( i=0; ioutputChannelCount; ++i ) - PaUtil_SetNonInterleavedOutputChannel( &theAsioStream->bufferProcessor, i, theAsioStream->outputBufferPtrs[index][i] ); - - int callbackResult; - if( theAsioStream->stopProcessing ) - callbackResult = paComplete; - else - callbackResult = paContinue; - unsigned long framesProcessed = PaUtil_EndBufferProcessing( &theAsioStream->bufferProcessor, &callbackResult ); - - if( theAsioStream->outputBufferConverter ) - { - for( i=0; ioutputChannelCount; i++ ) - { - theAsioStream->outputBufferConverter( theAsioStream->outputBufferPtrs[index][i], - theAsioStream->outputShift, theAsioStream->framesPerHostCallback ); - } - } - - PaUtil_EndCpuLoadMeasurement( &theAsioStream->cpuLoadMeasurer, framesProcessed ); - - // Finally if the driver supports the ASIOOutputReady() optimization, - // do it here, all data are in place - if( theAsioStream->postOutput ) - ASIOOutputReady(); - - if( callbackResult == paContinue ) - { - /* nothing special to do */ - } - else if( callbackResult == paAbort ) - { - /* finish playback immediately */ - theAsioStream->isActive = 0; - if( theAsioStream->streamRepresentation.streamFinishedCallback != 0 ) - theAsioStream->streamRepresentation.streamFinishedCallback( theAsioStream->streamRepresentation.userData ); - theAsioStream->streamFinishedCallbackCalled = true; - SetEvent( theAsioStream->completedBuffersPlayedEvent ); - theAsioStream->zeroOutput = true; - } - else /* paComplete or other non-zero value indicating complete */ - { - /* Finish playback once currently queued audio has completed. */ - theAsioStream->stopProcessing = true; - - if( PaUtil_IsBufferProcessorOutputEmpty( &theAsioStream->bufferProcessor ) ) - { - theAsioStream->zeroOutput = true; - theAsioStream->stopPlayoutCount = 0; - } - } - } - } - - ++buffersDone; - }while( PaAsio_AtomicDecrement(&theAsioStream->reenterCount) >= 0 ); - - return 0L; -} - - -static void sampleRateChanged(ASIOSampleRate sRate) -{ - // TAKEN FROM THE ASIO SDK - // do whatever you need to do if the sample rate changed - // usually this only happens during external sync. - // Audio processing is not stopped by the driver, actual sample rate - // might not have even changed, maybe only the sample rate status of an - // AES/EBU or S/PDIF digital input at the audio device. - // You might have to update time/sample related conversion routines, etc. - - (void) sRate; /* unused parameter */ - PA_DEBUG( ("sampleRateChanged : %d \n", sRate)); -} - -static long asioMessages(long selector, long value, void* message, double* opt) -{ -// TAKEN FROM THE ASIO SDK - // currently the parameters "value", "message" and "opt" are not used. - long ret = 0; - - (void) message; /* unused parameters */ - (void) opt; - - PA_DEBUG( ("asioMessages : %d , %d \n", selector, value)); - - switch(selector) - { - case kAsioSelectorSupported: - if(value == kAsioResetRequest - || value == kAsioEngineVersion - || value == kAsioResyncRequest - || value == kAsioLatenciesChanged - // the following three were added for ASIO 2.0, you don't necessarily have to support them - || value == kAsioSupportsTimeInfo - || value == kAsioSupportsTimeCode - || value == kAsioSupportsInputMonitor) - ret = 1L; - break; - - case kAsioBufferSizeChange: - //printf("kAsioBufferSizeChange \n"); - break; - - case kAsioResetRequest: - // defer the task and perform the reset of the driver during the next "safe" situation - // You cannot reset the driver right now, as this code is called from the driver. - // Reset the driver is done by completely destruct is. I.e. ASIOStop(), ASIODisposeBuffers(), Destruction - // Afterwards you initialize the driver again. - - /*FIXME: commented the next line out - - see: "PA/ASIO ignores some driver notifications it probably shouldn't" - http://www.portaudio.com/trac/ticket/108 - */ - //asioDriverInfo.stopped; // In this sample the processing will just stop - ret = 1L; - break; - - case kAsioResyncRequest: - // This informs the application, that the driver encountered some non fatal data loss. - // It is used for synchronization purposes of different media. - // Added mainly to work around the Win16Mutex problems in Windows 95/98 with the - // Windows Multimedia system, which could loose data because the Mutex was hold too long - // by another thread. - // However a driver can issue it in other situations, too. - ret = 1L; - break; - - case kAsioLatenciesChanged: - // This will inform the host application that the drivers were latencies changed. - // Beware, it this does not mean that the buffer sizes have changed! - // You might need to update internal delay data. - ret = 1L; - //printf("kAsioLatenciesChanged \n"); - break; - - case kAsioEngineVersion: - // return the supported ASIO version of the host application - // If a host applications does not implement this selector, ASIO 1.0 is assumed - // by the driver - ret = 2L; - break; - - case kAsioSupportsTimeInfo: - // informs the driver whether the asioCallbacks.bufferSwitchTimeInfo() callback - // is supported. - // For compatibility with ASIO 1.0 drivers the host application should always support - // the "old" bufferSwitch method, too. - ret = 1; - break; - - case kAsioSupportsTimeCode: - // informs the driver whether application is interested in time code info. - // If an application does not need to know about time code, the driver has less work - // to do. - ret = 0; - break; - } - return ret; -} - - -static PaError StartStream( PaStream *s ) -{ - PaError result = paNoError; - PaAsioStream *stream = (PaAsioStream*)s; - PaAsioStreamBlockingState *blockingState = stream->blockingState; - ASIOError asioError; - - if( stream->outputChannelCount > 0 ) - { - ZeroOutputBuffers( stream, 0 ); - ZeroOutputBuffers( stream, 1 ); - } - - PaUtil_ResetBufferProcessor( &stream->bufferProcessor ); - stream->stopProcessing = false; - stream->zeroOutput = false; - - /* Reentrancy counter initialisation */ - stream->reenterCount = -1; - stream->reenterError = 0; - - stream->callbackFlags = 0; - - if( ResetEvent( stream->completedBuffersPlayedEvent ) == 0 ) - { - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_SYSTEM_ERROR( GetLastError() ); - } - - - /* Using blocking i/o interface... */ - if( blockingState ) - { - /* Reset blocking i/o buffer processor. */ - PaUtil_ResetBufferProcessor( &blockingState->bufferProcessor ); - - /* If we're about to process some input data. */ - if( stream->inputChannelCount ) - { - /* Reset callback-ReadStream sync event. */ - if( ResetEvent( blockingState->readFramesReadyEvent ) == 0 ) - { - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_SYSTEM_ERROR( GetLastError() ); - } - - /* Flush blocking i/o ring buffer. */ - PaUtil_FlushRingBuffer( &blockingState->readRingBuffer ); - (*blockingState->bufferProcessor.inputZeroer)( blockingState->readRingBuffer.buffer, 1, blockingState->bufferProcessor.inputChannelCount * blockingState->readRingBuffer.bufferSize ); - } - - /* If we're about to process some output data. */ - if( stream->outputChannelCount ) - { - /* Reset callback-WriteStream sync event. */ - if( ResetEvent( blockingState->writeBuffersReadyEvent ) == 0 ) - { - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_SYSTEM_ERROR( GetLastError() ); - } - - /* Flush blocking i/o ring buffer. */ - PaUtil_FlushRingBuffer( &blockingState->writeRingBuffer ); - (*blockingState->bufferProcessor.outputZeroer)( blockingState->writeRingBuffer.buffer, 1, blockingState->bufferProcessor.outputChannelCount * blockingState->writeRingBuffer.bufferSize ); - - /* Initialize the output ring buffer to "silence". */ - PaUtil_AdvanceRingBufferWriteIndex( &blockingState->writeRingBuffer, blockingState->writeRingBufferInitialFrames ); - } - - /* Clear requested frames / buffers count. */ - blockingState->writeBuffersRequested = 0; - blockingState->readFramesRequested = 0; - blockingState->writeBuffersRequestedFlag = FALSE; - blockingState->readFramesRequestedFlag = FALSE; - blockingState->outputUnderflowFlag = FALSE; - blockingState->inputOverflowFlag = FALSE; - blockingState->stopFlag = FALSE; - } - - - if( result == paNoError ) - { - assert( theAsioStream == stream ); /* theAsioStream should be set correctly in OpenStream */ - - /* initialize these variables before the callback has a chance to be invoked */ - stream->isStopped = 0; - stream->isActive = 1; - stream->streamFinishedCallbackCalled = false; - - asioError = ASIOStart(); - if( asioError != ASE_OK ) - { - stream->isStopped = 1; - stream->isActive = 0; - - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_ASIO_ERROR( asioError ); - } - } - - return result; -} - -static void EnsureCallbackHasCompleted( PaAsioStream *stream ) -{ - // make sure that the callback is not still in-flight after ASIOStop() - // returns. This has been observed to happen on the Hoontech DSP24 for - // example. - int count = 2000; // only wait for 2 seconds, rather than hanging. - while( stream->reenterCount != -1 && count > 0 ) - { - Sleep(1); - --count; - } -} - -static PaError StopStream( PaStream *s ) -{ - PaError result = paNoError; - PaAsioStream *stream = (PaAsioStream*)s; - PaAsioStreamBlockingState *blockingState = stream->blockingState; - ASIOError asioError; - - if( stream->isActive ) - { - /* If blocking i/o output is in use */ - if( blockingState && stream->outputChannelCount ) - { - /* Request the whole output buffer to be available. */ - blockingState->writeBuffersRequested = blockingState->writeRingBuffer.bufferSize; - /* Signalize that additional buffers are need. */ - blockingState->writeBuffersRequestedFlag = TRUE; - /* Set flag to indicate the playback is to be stopped. */ - blockingState->stopFlag = TRUE; - - /* Wait until requested number of buffers has been freed. Time - out after twice the blocking i/o output buffer could have - been consumed. */ - DWORD timeout = (DWORD)( 2 * blockingState->writeRingBuffer.bufferSize * 1000 - / stream->streamRepresentation.streamInfo.sampleRate ); - DWORD waitResult = WaitForSingleObject( blockingState->writeBuffersReadyEvent, timeout ); - - /* If something seriously went wrong... */ - if( waitResult == WAIT_FAILED ) - { - PA_DEBUG(("WaitForSingleObject() failed in StopStream()\n")); - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_SYSTEM_ERROR( GetLastError() ); - } - else if( waitResult == WAIT_TIMEOUT ) - { - PA_DEBUG(("WaitForSingleObject() timed out in StopStream()\n")); - result = paTimedOut; - } - } - - stream->stopProcessing = true; - - /* wait for the stream to finish playing out enqueued buffers. - timeout after four times the stream latency. - - @todo should use a better time out value - if the user buffer - length is longer than the asio buffer size then that should - be taken into account. - */ - if( WaitForSingleObject( stream->completedBuffersPlayedEvent, - (DWORD)(stream->streamRepresentation.streamInfo.outputLatency * 1000. * 4.) ) - == WAIT_TIMEOUT ) - { - PA_DEBUG(("WaitForSingleObject() timed out in StopStream()\n" )); - } - } - - asioError = ASIOStop(); - if( asioError == ASE_OK ) - { - EnsureCallbackHasCompleted( stream ); - } - else - { - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_ASIO_ERROR( asioError ); - } - - stream->isStopped = 1; - stream->isActive = 0; - - if( !stream->streamFinishedCallbackCalled ) - { - if( stream->streamRepresentation.streamFinishedCallback != 0 ) - stream->streamRepresentation.streamFinishedCallback( stream->streamRepresentation.userData ); - } - - return result; -} - -static PaError AbortStream( PaStream *s ) -{ - PaError result = paNoError; - PaAsioStream *stream = (PaAsioStream*)s; - ASIOError asioError; - - stream->zeroOutput = true; - - asioError = ASIOStop(); - if( asioError == ASE_OK ) - { - EnsureCallbackHasCompleted( stream ); - } - else - { - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_ASIO_ERROR( asioError ); - } - - stream->isStopped = 1; - stream->isActive = 0; - - if( !stream->streamFinishedCallbackCalled ) - { - if( stream->streamRepresentation.streamFinishedCallback != 0 ) - stream->streamRepresentation.streamFinishedCallback( stream->streamRepresentation.userData ); - } - - return result; -} - - -static PaError IsStreamStopped( PaStream *s ) -{ - PaAsioStream *stream = (PaAsioStream*)s; - - return stream->isStopped; -} - - -static PaError IsStreamActive( PaStream *s ) -{ - PaAsioStream *stream = (PaAsioStream*)s; - - return stream->isActive; -} - - -static PaTime GetStreamTime( PaStream *s ) -{ - (void) s; /* unused parameter */ - - return (double)timeGetTime() * .001; -} - - -static double GetStreamCpuLoad( PaStream* s ) -{ - PaAsioStream *stream = (PaAsioStream*)s; - - return PaUtil_GetCpuLoad( &stream->cpuLoadMeasurer ); -} - - -/* - As separate stream interfaces are used for blocking and callback - streams, the following functions can be guaranteed to only be called - for blocking streams. -*/ - -static PaError ReadStream( PaStream *s , - void *buffer, - unsigned long frames ) -{ - PaError result = paNoError; /* Initial return value. */ - PaAsioStream *stream = (PaAsioStream*)s; /* The PA ASIO stream. */ - - /* Pointer to the blocking i/o data struct. */ - PaAsioStreamBlockingState *blockingState = stream->blockingState; - - /* Get blocking i/o buffer processor and ring buffer pointers. */ - PaUtilBufferProcessor *pBp = &blockingState->bufferProcessor; - PaUtilRingBuffer *pRb = &blockingState->readRingBuffer; - - /* Ring buffer segment(s) used for writing. */ - void *pRingBufferData1st = NULL; /* First segment. (Mandatory) */ - void *pRingBufferData2nd = NULL; /* Second segment. (Optional) */ - - /* Number of frames per ring buffer segment. */ - long lRingBufferSize1st = 0; /* First segment. (Mandatory) */ - long lRingBufferSize2nd = 0; /* Second segment. (Optional) */ - - /* Get number of frames to be processed per data block. */ - unsigned long lFramesPerBlock = stream->bufferProcessor.framesPerUserBuffer; - /* Actual number of frames that has been copied into the ring buffer. */ - unsigned long lFramesCopied = 0; - /* The number of remaining unprocessed dtat frames. */ - unsigned long lFramesRemaining = frames; - - /* Copy the input argument to avoid pointer increment! */ - const void *userBuffer; - unsigned int i; /* Just a counter. */ - - /* About the time, needed to process 8 data blocks. */ - DWORD timeout = (DWORD)( 8 * lFramesPerBlock * 1000 / stream->streamRepresentation.streamInfo.sampleRate ); - DWORD waitResult = 0; - - - /* Check if the stream is still available ready to gather new data. */ - if( blockingState->stopFlag || !stream->isActive ) - { - PA_DEBUG(("Warning! Stream no longer available for reading in ReadStream()\n")); - result = paStreamIsStopped; - return result; - } - - /* If the stream is a input stream. */ - if( stream->inputChannelCount ) - { - /* Prepare buffer access. */ - if( !pBp->userOutputIsInterleaved ) - { - userBuffer = blockingState->readStreamBuffer; - for( i = 0; iinputChannelCount; ++i ) - { - ((void**)userBuffer)[i] = ((void**)buffer)[i]; - } - } /* Use the unchanged buffer. */ - else { userBuffer = buffer; } - - do /* Internal block processing for too large user data buffers. */ - { - /* Get the size of the current data block to be processed. */ - lFramesPerBlock =(lFramesPerBlock < lFramesRemaining) - ? lFramesPerBlock : lFramesRemaining; - /* Use predefined block size for as long there are enough - buffers available, thereafter reduce the processing block - size to match the number of remaining buffers. So the final - data block is processed although it may be incomplete. */ - - /* If the available amount of data frames is insufficient. */ - if( PaUtil_GetRingBufferReadAvailable(pRb) < (long) lFramesPerBlock ) - { - /* Make sure, the event isn't already set! */ - /* ResetEvent( blockingState->readFramesReadyEvent ); */ - - /* Set the number of requested buffers. */ - blockingState->readFramesRequested = lFramesPerBlock; - - /* Signalize that additional buffers are need. */ - blockingState->readFramesRequestedFlag = TRUE; - - /* Wait until requested number of buffers has been freed. */ - waitResult = WaitForSingleObject( blockingState->readFramesReadyEvent, timeout ); - - /* If something seriously went wrong... */ - if( waitResult == WAIT_FAILED ) - { - PA_DEBUG(("WaitForSingleObject() failed in ReadStream()\n")); - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_SYSTEM_ERROR( GetLastError() ); - return result; - } - else if( waitResult == WAIT_TIMEOUT ) - { - PA_DEBUG(("WaitForSingleObject() timed out in ReadStream()\n")); - - /* If block processing has stopped, abort! */ - if( blockingState->stopFlag ) { return result = paStreamIsStopped; } - - /* If a timeout is encountered, give up eventually. */ - return result = paTimedOut; - } - } - /* Now, the ring buffer contains the required amount of data - frames. - (Therefor we don't need to check the return argument of - PaUtil_GetRingBufferReadRegions(). ;-) ) - */ - - /* Retrieve pointer(s) to the ring buffer's current write - position(s). If the first buffer segment is too small to - store the requested number of bytes, an additional second - segment is returned. Otherwise, i.e. if the first segment - is large enough, the second segment's pointer will be NULL. - */ - PaUtil_GetRingBufferReadRegions(pRb , - lFramesPerBlock , - &pRingBufferData1st, - &lRingBufferSize1st, - &pRingBufferData2nd, - &lRingBufferSize2nd); - - /* Set number of frames to be copied from the ring buffer. */ - PaUtil_SetInputFrameCount( pBp, lRingBufferSize1st ); - /* Setup ring buffer access. */ - PaUtil_SetInterleavedInputChannels(pBp , /* Buffer processor. */ - 0 , /* The first channel's index. */ - pRingBufferData1st, /* First ring buffer segment. */ - 0 ); /* Use all available channels. */ - - /* If a second ring buffer segment is required. */ - if( lRingBufferSize2nd ) { - /* Set number of frames to be copied from the ring buffer. */ - PaUtil_Set2ndInputFrameCount( pBp, lRingBufferSize2nd ); - /* Setup ring buffer access. */ - PaUtil_Set2ndInterleavedInputChannels(pBp , /* Buffer processor. */ - 0 , /* The first channel's index. */ - pRingBufferData2nd, /* Second ring buffer segment. */ - 0 ); /* Use all available channels. */ - } - - /* Let the buffer processor handle "copy and conversion" and - update the ring buffer indices manually. */ - lFramesCopied = PaUtil_CopyInput( pBp, &buffer, lFramesPerBlock ); - PaUtil_AdvanceRingBufferReadIndex( pRb, lFramesCopied ); - - /* Decrease number of unprocessed frames. */ - lFramesRemaining -= lFramesCopied; - - } /* Continue with the next data chunk. */ - while( lFramesRemaining ); - - - /* If there has been an input overflow within the callback */ - if( blockingState->inputOverflowFlag ) - { - blockingState->inputOverflowFlag = FALSE; - - /* Return the corresponding error code. */ - result = paInputOverflowed; - } - - } /* If this is not an input stream. */ - else { - result = paCanNotReadFromAnOutputOnlyStream; - } - - return result; -} - -static PaError WriteStream( PaStream *s , - const void *buffer, - unsigned long frames ) -{ - PaError result = paNoError; /* Initial return value. */ - PaAsioStream *stream = (PaAsioStream*)s; /* The PA ASIO stream. */ - - /* Pointer to the blocking i/o data struct. */ - PaAsioStreamBlockingState *blockingState = stream->blockingState; - - /* Get blocking i/o buffer processor and ring buffer pointers. */ - PaUtilBufferProcessor *pBp = &blockingState->bufferProcessor; - PaUtilRingBuffer *pRb = &blockingState->writeRingBuffer; - - /* Ring buffer segment(s) used for writing. */ - void *pRingBufferData1st = NULL; /* First segment. (Mandatory) */ - void *pRingBufferData2nd = NULL; /* Second segment. (Optional) */ - - /* Number of frames per ring buffer segment. */ - long lRingBufferSize1st = 0; /* First segment. (Mandatory) */ - long lRingBufferSize2nd = 0; /* Second segment. (Optional) */ - - /* Get number of frames to be processed per data block. */ - unsigned long lFramesPerBlock = stream->bufferProcessor.framesPerUserBuffer; - /* Actual number of frames that has been copied into the ring buffer. */ - unsigned long lFramesCopied = 0; - /* The number of remaining unprocessed dtat frames. */ - unsigned long lFramesRemaining = frames; - - /* About the time, needed to process 8 data blocks. */ - DWORD timeout = (DWORD)( 8 * lFramesPerBlock * 1000 / stream->streamRepresentation.streamInfo.sampleRate ); - DWORD waitResult = 0; - - /* Copy the input argument to avoid pointer increment! */ - const void *userBuffer; - unsigned int i; /* Just a counter. */ - - - /* Check if the stream is still available ready to receive new data. */ - if( blockingState->stopFlag || !stream->isActive ) - { - PA_DEBUG(("Warning! Stream no longer available for writing in WriteStream()\n")); - result = paStreamIsStopped; - return result; - } - - /* If the stream is a output stream. */ - if( stream->outputChannelCount ) - { - /* Prepare buffer access. */ - if( !pBp->userOutputIsInterleaved ) - { - userBuffer = blockingState->writeStreamBuffer; - for( i = 0; ioutputChannelCount; ++i ) - { - ((const void**)userBuffer)[i] = ((const void**)buffer)[i]; - } - } /* Use the unchanged buffer. */ - else { userBuffer = buffer; } - - - do /* Internal block processing for too large user data buffers. */ - { - /* Get the size of the current data block to be processed. */ - lFramesPerBlock =(lFramesPerBlock < lFramesRemaining) - ? lFramesPerBlock : lFramesRemaining; - /* Use predefined block size for as long there are enough - frames available, thereafter reduce the processing block - size to match the number of remaining frames. So the final - data block is processed although it may be incomplete. */ - - /* If the available amount of buffers is insufficient. */ - if( PaUtil_GetRingBufferWriteAvailable(pRb) < (long) lFramesPerBlock ) - { - /* Make sure, the event isn't already set! */ - /* ResetEvent( blockingState->writeBuffersReadyEvent ); */ - - /* Set the number of requested buffers. */ - blockingState->writeBuffersRequested = lFramesPerBlock; - - /* Signalize that additional buffers are need. */ - blockingState->writeBuffersRequestedFlag = TRUE; - - /* Wait until requested number of buffers has been freed. */ - waitResult = WaitForSingleObject( blockingState->writeBuffersReadyEvent, timeout ); - - /* If something seriously went wrong... */ - if( waitResult == WAIT_FAILED ) - { - PA_DEBUG(("WaitForSingleObject() failed in WriteStream()\n")); - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_SYSTEM_ERROR( GetLastError() ); - return result; - } - else if( waitResult == WAIT_TIMEOUT ) - { - PA_DEBUG(("WaitForSingleObject() timed out in WriteStream()\n")); - - /* If block processing has stopped, abort! */ - if( blockingState->stopFlag ) { return result = paStreamIsStopped; } - - /* If a timeout is encountered, give up eventually. */ - return result = paTimedOut; - } - } - /* Now, the ring buffer contains the required amount of free - space to store the provided number of data frames. - (Therefor we don't need to check the return argument of - PaUtil_GetRingBufferWriteRegions(). ;-) ) - */ - - /* Retrieve pointer(s) to the ring buffer's current write - position(s). If the first buffer segment is too small to - store the requested number of bytes, an additional second - segment is returned. Otherwise, i.e. if the first segment - is large enough, the second segment's pointer will be NULL. - */ - PaUtil_GetRingBufferWriteRegions(pRb , - lFramesPerBlock , - &pRingBufferData1st, - &lRingBufferSize1st, - &pRingBufferData2nd, - &lRingBufferSize2nd); - - /* Set number of frames to be copied to the ring buffer. */ - PaUtil_SetOutputFrameCount( pBp, lRingBufferSize1st ); - /* Setup ring buffer access. */ - PaUtil_SetInterleavedOutputChannels(pBp , /* Buffer processor. */ - 0 , /* The first channel's index. */ - pRingBufferData1st, /* First ring buffer segment. */ - 0 ); /* Use all available channels. */ - - /* If a second ring buffer segment is required. */ - if( lRingBufferSize2nd ) { - /* Set number of frames to be copied to the ring buffer. */ - PaUtil_Set2ndOutputFrameCount( pBp, lRingBufferSize2nd ); - /* Setup ring buffer access. */ - PaUtil_Set2ndInterleavedOutputChannels(pBp , /* Buffer processor. */ - 0 , /* The first channel's index. */ - pRingBufferData2nd, /* Second ring buffer segment. */ - 0 ); /* Use all available channels. */ - } - - /* Let the buffer processor handle "copy and conversion" and - update the ring buffer indices manually. */ - lFramesCopied = PaUtil_CopyOutput( pBp, &userBuffer, lFramesPerBlock ); - PaUtil_AdvanceRingBufferWriteIndex( pRb, lFramesCopied ); - - /* Decrease number of unprocessed frames. */ - lFramesRemaining -= lFramesCopied; - - } /* Continue with the next data chunk. */ - while( lFramesRemaining ); - - - /* If there has been an output underflow within the callback */ - if( blockingState->outputUnderflowFlag ) - { - blockingState->outputUnderflowFlag = FALSE; - - /* Return the corresponding error code. */ - result = paOutputUnderflowed; - } - - } /* If this is not an output stream. */ - else - { - result = paCanNotWriteToAnInputOnlyStream; - } - - return result; -} - - -static signed long GetStreamReadAvailable( PaStream* s ) -{ - PaAsioStream *stream = (PaAsioStream*)s; - - /* Call buffer utility routine to get the number of available frames. */ - return PaUtil_GetRingBufferReadAvailable( &stream->blockingState->readRingBuffer ); -} - - -static signed long GetStreamWriteAvailable( PaStream* s ) -{ - PaAsioStream *stream = (PaAsioStream*)s; - - /* Call buffer utility routine to get the number of empty buffers. */ - return PaUtil_GetRingBufferWriteAvailable( &stream->blockingState->writeRingBuffer ); -} - - -/* This routine will be called by the PortAudio engine when audio is needed. -** It may called at interrupt level on some machines so don't do anything -** that could mess up the system like calling malloc() or free(). -*/ -static int BlockingIoPaCallback(const void *inputBuffer , - void *outputBuffer , - unsigned long framesPerBuffer, - const PaStreamCallbackTimeInfo *timeInfo , - PaStreamCallbackFlags statusFlags , - void *userData ) -{ - PaError result = paNoError; /* Initial return value. */ - PaAsioStream *stream = *(PaAsioStream**)userData; /* The PA ASIO stream. */ - PaAsioStreamBlockingState *blockingState = stream->blockingState; /* Persume blockingState is valid, otherwise the callback wouldn't be running. */ - - /* Get a pointer to the stream's blocking i/o buffer processor. */ - PaUtilBufferProcessor *pBp = &blockingState->bufferProcessor; - PaUtilRingBuffer *pRb = NULL; - - /* If output data has been requested. */ - if( stream->outputChannelCount ) - { - /* If the callback input argument signalizes a output underflow, - make sure the WriteStream() function knows about it, too! */ - if( statusFlags & paOutputUnderflowed ) { - blockingState->outputUnderflowFlag = TRUE; - } - - /* Access the corresponding ring buffer. */ - pRb = &blockingState->writeRingBuffer; - - /* If the blocking i/o buffer contains enough output data, */ - if( PaUtil_GetRingBufferReadAvailable(pRb) >= (long) framesPerBuffer ) - { - /* Extract the requested data from the ring buffer. */ - PaUtil_ReadRingBuffer( pRb, outputBuffer, framesPerBuffer ); - } - else /* If no output data is available :-( */ - { - /* Signalize a write-buffer underflow. */ - blockingState->outputUnderflowFlag = TRUE; - - /* Fill the output buffer with silence. */ - (*pBp->outputZeroer)( outputBuffer, 1, pBp->outputChannelCount * framesPerBuffer ); - - /* If playback is to be stopped */ - if( blockingState->stopFlag && PaUtil_GetRingBufferReadAvailable(pRb) < (long) framesPerBuffer ) - { - /* Extract all the remaining data from the ring buffer, - whether it is a complete data block or not. */ - PaUtil_ReadRingBuffer( pRb, outputBuffer, PaUtil_GetRingBufferReadAvailable(pRb) ); - } - } - - /* Set blocking i/o event? */ - if( blockingState->writeBuffersRequestedFlag && PaUtil_GetRingBufferWriteAvailable(pRb) >= (long) blockingState->writeBuffersRequested ) - { - /* Reset buffer request. */ - blockingState->writeBuffersRequestedFlag = FALSE; - blockingState->writeBuffersRequested = 0; - /* Signalize that requested buffers are ready. */ - SetEvent( blockingState->writeBuffersReadyEvent ); - /* What do we do if SetEvent() returns zero, i.e. the event - could not be set? How to return errors from within the - callback? - S.Fischer */ - } - } - - /* If input data has been supplied. */ - if( stream->inputChannelCount ) - { - /* If the callback input argument signalizes a input overflow, - make sure the ReadStream() function knows about it, too! */ - if( statusFlags & paInputOverflowed ) { - blockingState->inputOverflowFlag = TRUE; - } - - /* Access the corresponding ring buffer. */ - pRb = &blockingState->readRingBuffer; - - /* If the blocking i/o buffer contains not enough input buffers */ - if( PaUtil_GetRingBufferWriteAvailable(pRb) < (long) framesPerBuffer ) - { - /* Signalize a read-buffer overflow. */ - blockingState->inputOverflowFlag = TRUE; - - /* Remove some old data frames from the buffer. */ - PaUtil_AdvanceRingBufferReadIndex( pRb, framesPerBuffer ); - } - - /* Insert the current input data into the ring buffer. */ - PaUtil_WriteRingBuffer( pRb, inputBuffer, framesPerBuffer ); - - /* Set blocking i/o event? */ - if( blockingState->readFramesRequestedFlag && PaUtil_GetRingBufferReadAvailable(pRb) >= (long) blockingState->readFramesRequested ) - { - /* Reset buffer request. */ - blockingState->readFramesRequestedFlag = FALSE; - blockingState->readFramesRequested = 0; - /* Signalize that requested buffers are ready. */ - SetEvent( blockingState->readFramesReadyEvent ); - /* What do we do if SetEvent() returns zero, i.e. the event - could not be set? How to return errors from within the - callback? - S.Fischer */ - /** @todo report an error with PA_DEBUG */ - } - } - - return paContinue; -} - - -PaError PaAsio_ShowControlPanel( PaDeviceIndex device, void* systemSpecific ) -{ - PaError result = paNoError; - PaUtilHostApiRepresentation *hostApi; - PaDeviceIndex hostApiDevice; - ASIODriverInfo asioDriverInfo; - ASIOError asioError; - int asioIsInitialized = 0; - PaAsioHostApiRepresentation *asioHostApi; - PaAsioDeviceInfo *asioDeviceInfo; - PaWinUtilComInitializationResult comInitializationResult; - - /* initialize COM again here, we might be in another thread */ - result = PaWinUtil_CoInitialize( paASIO, &comInitializationResult ); - if( result != paNoError ) - return result; - - result = PaUtil_GetHostApiRepresentation( &hostApi, paASIO ); - if( result != paNoError ) - goto error; - - result = PaUtil_DeviceIndexToHostApiDeviceIndex( &hostApiDevice, device, hostApi ); - if( result != paNoError ) - goto error; - - /* - In theory we could proceed if the currently open device was the same - one for which the control panel was requested, however because the - window pointer is not available until this function is called we - currently need to call ASIOInit() again here, which of course can't be - done safely while a stream is open. - */ - - asioHostApi = (PaAsioHostApiRepresentation*)hostApi; - if( asioHostApi->openAsioDeviceIndex != paNoDevice ) - { - result = paDeviceUnavailable; - goto error; - } - - asioDeviceInfo = (PaAsioDeviceInfo*)hostApi->deviceInfos[hostApiDevice]; - - if( !asioHostApi->asioDrivers->loadDriver( const_cast(asioDeviceInfo->commonDeviceInfo.name) ) ) - { - result = paUnanticipatedHostError; - goto error; - } - - /* CRUCIAL!!! */ - memset( &asioDriverInfo, 0, sizeof(ASIODriverInfo) ); - asioDriverInfo.asioVersion = 2; - asioDriverInfo.sysRef = systemSpecific; - asioError = ASIOInit( &asioDriverInfo ); - if( asioError != ASE_OK ) - { - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_ASIO_ERROR( asioError ); - goto error; - } - else - { - asioIsInitialized = 1; - } - -PA_DEBUG(("PaAsio_ShowControlPanel: ASIOInit(): %s\n", PaAsio_GetAsioErrorText(asioError) )); -PA_DEBUG(("asioVersion: ASIOInit(): %ld\n", asioDriverInfo.asioVersion )); -PA_DEBUG(("driverVersion: ASIOInit(): %ld\n", asioDriverInfo.driverVersion )); -PA_DEBUG(("Name: ASIOInit(): %s\n", asioDriverInfo.name )); -PA_DEBUG(("ErrorMessage: ASIOInit(): %s\n", asioDriverInfo.errorMessage )); - - asioError = ASIOControlPanel(); - if( asioError != ASE_OK ) - { - PA_DEBUG(("PaAsio_ShowControlPanel: ASIOControlPanel(): %s\n", PaAsio_GetAsioErrorText(asioError) )); - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_ASIO_ERROR( asioError ); - goto error; - } - -PA_DEBUG(("PaAsio_ShowControlPanel: ASIOControlPanel(): %s\n", PaAsio_GetAsioErrorText(asioError) )); - - asioError = ASIOExit(); - if( asioError != ASE_OK ) - { - result = paUnanticipatedHostError; - PA_ASIO_SET_LAST_ASIO_ERROR( asioError ); - asioIsInitialized = 0; - goto error; - } - -PA_DEBUG(("PaAsio_ShowControlPanel: ASIOExit(): %s\n", PaAsio_GetAsioErrorText(asioError) )); - - return result; - -error: - if( asioIsInitialized ) - { - ASIOExit(); - } - - PaWinUtil_CoUninitialize( paASIO, &comInitializationResult ); - - return result; -} - - -PaError PaAsio_GetInputChannelName( PaDeviceIndex device, int channelIndex, - const char** channelName ) -{ - PaError result = paNoError; - PaUtilHostApiRepresentation *hostApi; - PaDeviceIndex hostApiDevice; - PaAsioDeviceInfo *asioDeviceInfo; - - - result = PaUtil_GetHostApiRepresentation( &hostApi, paASIO ); - if( result != paNoError ) - goto error; - - result = PaUtil_DeviceIndexToHostApiDeviceIndex( &hostApiDevice, device, hostApi ); - if( result != paNoError ) - goto error; - - asioDeviceInfo = (PaAsioDeviceInfo*)hostApi->deviceInfos[hostApiDevice]; - - if( channelIndex < 0 || channelIndex >= asioDeviceInfo->commonDeviceInfo.maxInputChannels ){ - result = paInvalidChannelCount; - goto error; - } - - *channelName = asioDeviceInfo->asioChannelInfos[channelIndex].name; - - return paNoError; - -error: - return result; -} - - -PaError PaAsio_GetOutputChannelName( PaDeviceIndex device, int channelIndex, - const char** channelName ) -{ - PaError result = paNoError; - PaUtilHostApiRepresentation *hostApi; - PaDeviceIndex hostApiDevice; - PaAsioDeviceInfo *asioDeviceInfo; - - - result = PaUtil_GetHostApiRepresentation( &hostApi, paASIO ); - if( result != paNoError ) - goto error; - - result = PaUtil_DeviceIndexToHostApiDeviceIndex( &hostApiDevice, device, hostApi ); - if( result != paNoError ) - goto error; - - asioDeviceInfo = (PaAsioDeviceInfo*)hostApi->deviceInfos[hostApiDevice]; - - if( channelIndex < 0 || channelIndex >= asioDeviceInfo->commonDeviceInfo.maxOutputChannels ){ - result = paInvalidChannelCount; - goto error; - } - - *channelName = asioDeviceInfo->asioChannelInfos[ - asioDeviceInfo->commonDeviceInfo.maxInputChannels + channelIndex].name; - - return paNoError; - -error: - return result; -} - - -/* NOTE: the following functions are ASIO-stream specific, and are called directly - by client code. We need to check for many more error conditions here because - we don't have the benefit of pa_front.c's parameter checking. -*/ - -static PaError GetAsioStreamPointer( PaAsioStream **stream, PaStream *s ) -{ - PaError result; - PaUtilHostApiRepresentation *hostApi; - PaAsioHostApiRepresentation *asioHostApi; - - result = PaUtil_ValidateStreamPointer( s ); - if( result != paNoError ) - return result; - - result = PaUtil_GetHostApiRepresentation( &hostApi, paASIO ); - if( result != paNoError ) - return result; - - asioHostApi = (PaAsioHostApiRepresentation*)hostApi; - - if( PA_STREAM_REP( s )->streamInterface == &asioHostApi->callbackStreamInterface - || PA_STREAM_REP( s )->streamInterface == &asioHostApi->blockingStreamInterface ) - { - /* s is an ASIO stream */ - *stream = (PaAsioStream *)s; - return paNoError; - } - else - { - return paIncompatibleStreamHostApi; - } -} - - -PaError PaAsio_SetStreamSampleRate( PaStream* s, double sampleRate ) -{ - PaAsioStream *stream; - PaError result = GetAsioStreamPointer( &stream, s ); - if( result != paNoError ) - return result; - - if( stream != theAsioStream ) - return paBadStreamPtr; - - return ValidateAndSetSampleRate( sampleRate ); -} diff --git a/spaces/amsterdamNLP/attention-rollout/lib/NewExplanationGenerator.py b/spaces/amsterdamNLP/attention-rollout/lib/NewExplanationGenerator.py deleted file mode 100644 index dac25794fb65e0244af0ead26b7ea8dee08e1a2a..0000000000000000000000000000000000000000 --- a/spaces/amsterdamNLP/attention-rollout/lib/NewExplanationGenerator.py +++ /dev/null @@ -1,145 +0,0 @@ -import argparse -import numpy as np -import torch -import glob - -from captum._utils.common import _get_module_from_name - -# compute rollout between attention layers -def compute_rollout_attention(all_layer_matrices, start_layer=0): - # adding residual consideration- code adapted from https://github.com/samiraabnar/attention_flow - num_tokens = all_layer_matrices[0].shape[1] - batch_size = all_layer_matrices[0].shape[0] - eye = torch.eye(num_tokens).expand(batch_size, num_tokens, num_tokens).to(all_layer_matrices[0].device) - all_layer_matrices = [all_layer_matrices[i] + eye for i in range(len(all_layer_matrices))] - matrices_aug = [all_layer_matrices[i] / all_layer_matrices[i].sum(dim=-1, keepdim=True) - for i in range(len(all_layer_matrices))] - joint_attention = matrices_aug[start_layer] - for i in range(start_layer+1, len(matrices_aug)): - joint_attention = matrices_aug[i].bmm(joint_attention) - return joint_attention - -class Generator: - def __init__(self, model, key="bert.encoder.layer"): - self.model = model - self.key = key - self.model.eval() - - def forward(self, input_ids, attention_mask): - return self.model(input_ids, attention_mask) - - def _build_one_hot(self, output, index): - if index == None: - index = np.argmax(output.cpu().data.numpy(), axis=-1) - - one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32) - one_hot[0, index] = 1 - one_hot_vector = one_hot - one_hot = torch.from_numpy(one_hot).requires_grad_(True).to(output.device) - one_hot = torch.sum(one_hot * output) - - return one_hot, one_hot_vector - - def generate_LRP(self, input_ids, attention_mask, - index=None, start_layer=11): - output = self.model(input_ids=input_ids, attention_mask=attention_mask)[0] - kwargs = {"alpha": 1} - - one_hot, one_hot_vector = self._build_one_hot(output, index) - self.model.zero_grad() - one_hot.backward(retain_graph=True) - - self.model.relprop(torch.tensor(one_hot_vector).to(input_ids.device), **kwargs) - - cams = [] - blocks = _get_module_from_name(self.model, self.key) - for blk in blocks: - grad = blk.attention.self.get_attn_gradients() - cam = blk.attention.self.get_attn_cam() - cam = cam[0].reshape(-1, cam.shape[-1], cam.shape[-1]) - grad = grad[0].reshape(-1, grad.shape[-1], grad.shape[-1]) - cam = grad * cam - cam = cam.clamp(min=0).mean(dim=0) - cams.append(cam.unsqueeze(0)) - rollout = compute_rollout_attention(cams, start_layer=start_layer) - rollout[:, 0, 0] = rollout[:, 0].min() - return rollout[:, 0] - - def generate_LRP_last_layer(self, input_ids, attention_mask, - index=None): - output = self.model(input_ids=input_ids, attention_mask=attention_mask)[0] - kwargs = {"alpha": 1} - - one_hot, one_hot_vector = self._build_one_hot(output, index) - - self.model.zero_grad() - one_hot.backward(retain_graph=True) - - self.model.relprop(torch.tensor(one_hot_vector).to(input_ids.device), **kwargs) - - cam = _get_module_from_name(self.model, self.key)[-1].attention.self.get_attn_cam()[0] - cam = cam.clamp(min=0).mean(dim=0).unsqueeze(0) - cam[:, 0, 0] = 0 - return cam[:, 0] - - def generate_full_lrp(self, input_ids, attention_mask, - index=None): - output = self.model(input_ids=input_ids, attention_mask=attention_mask)[0] - kwargs = {"alpha": 1} - - one_hot, one_hot_vector = self._build_one_hot(output, index) - - self.model.zero_grad() - one_hot.backward(retain_graph=True) - - cam = self.model.relprop(torch.tensor(one_hot_vector).to(input_ids.device), **kwargs) - cam = cam.sum(dim=2) - cam[:, 0] = 0 - return cam - - def generate_attn_last_layer(self, input_ids, attention_mask, - index=None): - output = self.model(input_ids=input_ids, attention_mask=attention_mask)[0] - cam = _get_module_from_name(self.model, self.key)[-1].attention.self.get_attn()[0] - cam = cam.mean(dim=0).unsqueeze(0) - cam[:, 0, 0] = 0 - return cam[:, 0] - - def generate_rollout(self, input_ids, attention_mask, start_layer=0, index=None): - self.model.zero_grad() - output = self.model(input_ids=input_ids, attention_mask=attention_mask)[0] - blocks = _get_module_from_name(self.model, self.key) - all_layer_attentions = [] - for blk in blocks: - attn_heads = blk.attention.self.get_attn() - avg_heads = (attn_heads.sum(dim=1) / attn_heads.shape[1]).detach() - all_layer_attentions.append(avg_heads) - rollout = compute_rollout_attention(all_layer_attentions, start_layer=start_layer) - rollout[:, 0, 0] = 0 - return rollout[:, 0] - - def generate_attn_gradcam(self, input_ids, attention_mask, index=None): - output = self.model(input_ids=input_ids, attention_mask=attention_mask)[0] - kwargs = {"alpha": 1} - - if index == None: - index = np.argmax(output.cpu().data.numpy(), axis=-1) - - one_hot, one_hot_vector = self._build_one_hot(output, index) - - self.model.zero_grad() - one_hot.backward(retain_graph=True) - - self.model.relprop(torch.tensor(one_hot_vector).to(input_ids.device), **kwargs) - - cam = _get_module_from_name(self.model, self.key)[-1].attention.self.get_attn() - grad = _get_module_from_name(self.model, self.key)[-1].attention.self.get_attn_gradients() - - cam = cam[0].reshape(-1, cam.shape[-1], cam.shape[-1]) - grad = grad[0].reshape(-1, grad.shape[-1], grad.shape[-1]) - grad = grad.mean(dim=[1, 2], keepdim=True) - cam = (cam * grad).mean(0).clamp(min=0).unsqueeze(0) - cam = (cam - cam.min()) / (cam.max() - cam.min()) - cam[:, 0, 0] = 0 - return cam[:, 0] - diff --git a/spaces/arnavkartikeya/SCRIPture-final/eval_retrieval_video.py b/spaces/arnavkartikeya/SCRIPture-final/eval_retrieval_video.py deleted file mode 100644 index 07ebab7f41f6466f6f46130002e2e0df1266486a..0000000000000000000000000000000000000000 --- a/spaces/arnavkartikeya/SCRIPture-final/eval_retrieval_video.py +++ /dev/null @@ -1,250 +0,0 @@ -''' - * Copyright (c) 2022, salesforce.com, inc. - * All rights reserved. - * SPDX-License-Identifier: BSD-3-Clause - * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause - * By Junnan Li -''' -import argparse -import os -import ruamel_yaml as yaml -import numpy as np -import random -import time -import datetime -import json -from pathlib import Path - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.backends.cudnn as cudnn -import torch.distributed as dist -from torch.utils.data import DataLoader - -from models.blip_retrieval import blip_retrieval -import utils -from data.video_dataset import VideoDataset - - -@torch.no_grad() -def evaluation(model, data_loader, tokenizer, device, config): - # test - model.eval() - - metric_logger = utils.MetricLogger(delimiter=" ") - header = 'Evaluation:' - - print('Computing features for evaluation...') - start_time = time.time() - - texts = data_loader.dataset.text - num_text = len(texts) - text_bs = 256 - text_ids = [] - text_embeds = [] - text_atts = [] - for i in range(0, num_text, text_bs): - text = texts[i: min(num_text, i+text_bs)] - text_input = tokenizer(text, padding='max_length', truncation=True, max_length=35, return_tensors="pt").to(device) - text_output = model.text_encoder(text_input.input_ids, attention_mask = text_input.attention_mask, mode='text') - text_embed = F.normalize(model.text_proj(text_output.last_hidden_state[:,0,:])) - text_embeds.append(text_embed) - text_ids.append(text_input.input_ids) - text_atts.append(text_input.attention_mask) - - text_embeds = torch.cat(text_embeds,dim=0) - text_ids = torch.cat(text_ids,dim=0) - text_atts = torch.cat(text_atts,dim=0) - text_ids[:,0] = tokenizer.additional_special_tokens_ids[0] - - video_feats = [] - video_embeds = [] - for video, video_id in data_loader: - - B,N,C,W,H = video.size() - video = video.view(-1,C,W,H) - video = video.to(device,non_blocking=True) - video_feat = model.visual_encoder(video) - video_embed = model.vision_proj(video_feat[:,0,:]) - video_embed = video_embed.view(B,N,-1).mean(dim=1) - video_embed = F.normalize(video_embed,dim=-1) - - video_feat = video_feat.view(B,-1,video_feat.shape[-1]) - video_feats.append(video_feat.cpu()) - video_embeds.append(video_embed) - - video_feats = torch.cat(video_feats,dim=0) - video_embeds = torch.cat(video_embeds,dim=0) - - sims_matrix = video_embeds @ text_embeds.t() - score_matrix_v2t = torch.full((len(texts),len(texts)),-100.0).to(device) - - num_tasks = utils.get_world_size() - rank = utils.get_rank() - step = sims_matrix.size(0)//num_tasks + 1 - start = rank*step - end = min(sims_matrix.size(0),start+step) - - for i,sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)): - topk_sim, topk_idx = sims.topk(k=config['k_test'], dim=0) - - encoder_output = video_feats[start+i].repeat(config['k_test'],1,1).to(device,non_blocking=True) - encoder_att = torch.ones(encoder_output.size()[:-1],dtype=torch.long).to(device,non_blocking=True) - output = model.text_encoder(text_ids[topk_idx], - attention_mask = text_atts[topk_idx], - encoder_hidden_states = encoder_output, - encoder_attention_mask = encoder_att, - return_dict = True, - ) - score = model.itm_head(output.last_hidden_state[:,0,:])[:,1] - score_matrix_v2t[start+i,topk_idx] = score + topk_sim - - sims_matrix = sims_matrix.t() - score_matrix_t2v = torch.full((len(texts),len(texts)),-100.0).to(device) - - step = sims_matrix.size(0)//num_tasks + 1 - start = rank*step - end = min(sims_matrix.size(0),start+step) - - for i,sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)): - - topk_sim, topk_idx = sims.topk(k=config['k_test'], dim=0) - encoder_output = video_feats[topk_idx].to(device,non_blocking=True) - encoder_att = torch.ones(encoder_output.size()[:-1],dtype=torch.long).to(device,non_blocking=True) - output = model.text_encoder(text_ids[start+i].repeat(config['k_test'],1), - attention_mask = text_atts[start+i].repeat(config['k_test'],1), - encoder_hidden_states = encoder_output, - encoder_attention_mask = encoder_att, - return_dict = True, - ) - score = model.itm_head(output.last_hidden_state[:,0,:])[:,1] - score_matrix_t2v[start+i,topk_idx] = score + topk_sim - - if args.distributed: - dist.barrier() - torch.distributed.all_reduce(score_matrix_v2t, op=torch.distributed.ReduceOp.SUM) - torch.distributed.all_reduce(score_matrix_t2v, op=torch.distributed.ReduceOp.SUM) - - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print('Evaluation time {}'.format(total_time_str)) - - return score_matrix_v2t.cpu().numpy(), score_matrix_t2v.cpu().numpy() - - - -@torch.no_grad() -def itm_eval(scores_v2t, scores_t2v, txt2vmg, vid2txt): - - #Video->Text - ranks = np.zeros(scores_v2t.shape[0]) - for index,score in enumerate(scores_v2t): - inds = np.argsort(score)[::-1] - ranks[index] = np.where(inds == vid2txt[index])[0][0] - - # Compute metrics - tr1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks) - tr5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks) - tr10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks) - - #Text->Video - ranks = np.zeros(scores_t2v.shape[0]) - - for index,score in enumerate(scores_t2v): - inds = np.argsort(score)[::-1] - ranks[index] = np.where(inds == txt2vmg[index])[0][0] - - mdR = np.median(ranks+1) - - # Compute metrics - vr1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks) - vr5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks) - vr10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks) - - tr_mean = (tr1 + tr5 + tr10) / 3 - vr_mean = (vr1 + vr5 + vr10) / 3 - r_mean = (tr_mean + vr_mean) / 2 - - eval_result = {'txt_r1': tr1, - 'txt_r5': tr5, - 'txt_r10': tr10, - 'txt_r_mean': tr_mean, - 'vid_r1': vr1, - 'vid_r5': vr5, - 'vid_r10': vr10, - 'vid_r_mean': vr_mean, - 'vid_mdR': mdR, - 'r_mean': r_mean} - return eval_result - - - - -def main(args, config): - utils.init_distributed_mode(args) - - device = torch.device(args.device) - - # fix the seed for reproducibility - seed = args.seed + utils.get_rank() - torch.manual_seed(seed) - np.random.seed(seed) - random.seed(seed) - cudnn.benchmark = True - - #### Dataset #### - print("Creating retrieval dataset") - test_dataset = VideoDataset(config['video_root'],config['ann_root'],num_frm=config['num_frm_test'], - max_img_size=config['image_size'], frm_sampling_strategy='uniform') - - test_loader = DataLoader( - test_dataset, - batch_size=config['batch_size'], - num_workers=4, - pin_memory=True, - drop_last=False, - shuffle=False, - ) - - #### Model #### - print("Creating model") - model = blip_retrieval(pretrained=config['pretrained'], image_size=config['image_size'], vit=config['vit']) - - model = model.to(device) - - model_without_ddp = model - if args.distributed: - model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) - model_without_ddp = model.module - - score_v2t, score_t2v, = evaluation(model_without_ddp, test_loader, model_without_ddp.tokenizer, device, config) - - if utils.is_main_process(): - - test_result = itm_eval(score_v2t, score_t2v, test_loader.dataset.txt2video, test_loader.dataset.video2txt) - print(test_result) - - log_stats = {**{f'{k}': v for k, v in test_result.items()},} - with open(os.path.join(args.output_dir, "test_result.txt"),"a") as f: - f.write(json.dumps(log_stats) + "\n") - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--config', default='./configs/retrieval_msrvtt.yaml') - parser.add_argument('--output_dir', default='output/Retrieval_msrvtt') - parser.add_argument('--device', default='cuda') - parser.add_argument('--seed', default=42, type=int) - parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') - parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') - parser.add_argument('--distributed', default=True, type=bool) - args = parser.parse_args() - - config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader) - - Path(args.output_dir).mkdir(parents=True, exist_ok=True) - - yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w')) - - main(args, config) \ No newline at end of file diff --git a/spaces/artificialguybr/qwen-vl/README.md b/spaces/artificialguybr/qwen-vl/README.md deleted file mode 100644 index fdc516e475fe10dcdd385a3a69a5528271331034..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/qwen-vl/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Qwen VL -emoji: ⚡ -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 3.42.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/generic/__init__.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/generic/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/tracing.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/tracing.py deleted file mode 100644 index d5596a4ceab79aff362203376952edc3122bf811..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/tracing.py +++ /dev/null @@ -1,472 +0,0 @@ -from types import SimpleNamespace -from typing import TYPE_CHECKING, Awaitable, Optional, Type, TypeVar - -import attr -from aiosignal import Signal -from multidict import CIMultiDict -from yarl import URL - -from .client_reqrep import ClientResponse - -if TYPE_CHECKING: # pragma: no cover - from .client import ClientSession - from .typedefs import Protocol - - _ParamT_contra = TypeVar("_ParamT_contra", contravariant=True) - - class _SignalCallback(Protocol[_ParamT_contra]): - def __call__( - self, - __client_session: ClientSession, - __trace_config_ctx: SimpleNamespace, - __params: _ParamT_contra, - ) -> Awaitable[None]: - ... - - -__all__ = ( - "TraceConfig", - "TraceRequestStartParams", - "TraceRequestEndParams", - "TraceRequestExceptionParams", - "TraceConnectionQueuedStartParams", - "TraceConnectionQueuedEndParams", - "TraceConnectionCreateStartParams", - "TraceConnectionCreateEndParams", - "TraceConnectionReuseconnParams", - "TraceDnsResolveHostStartParams", - "TraceDnsResolveHostEndParams", - "TraceDnsCacheHitParams", - "TraceDnsCacheMissParams", - "TraceRequestRedirectParams", - "TraceRequestChunkSentParams", - "TraceResponseChunkReceivedParams", - "TraceRequestHeadersSentParams", -) - - -class TraceConfig: - """First-class used to trace requests launched via ClientSession objects.""" - - def __init__( - self, trace_config_ctx_factory: Type[SimpleNamespace] = SimpleNamespace - ) -> None: - self._on_request_start: Signal[ - _SignalCallback[TraceRequestStartParams] - ] = Signal(self) - self._on_request_chunk_sent: Signal[ - _SignalCallback[TraceRequestChunkSentParams] - ] = Signal(self) - self._on_response_chunk_received: Signal[ - _SignalCallback[TraceResponseChunkReceivedParams] - ] = Signal(self) - self._on_request_end: Signal[_SignalCallback[TraceRequestEndParams]] = Signal( - self - ) - self._on_request_exception: Signal[ - _SignalCallback[TraceRequestExceptionParams] - ] = Signal(self) - self._on_request_redirect: Signal[ - _SignalCallback[TraceRequestRedirectParams] - ] = Signal(self) - self._on_connection_queued_start: Signal[ - _SignalCallback[TraceConnectionQueuedStartParams] - ] = Signal(self) - self._on_connection_queued_end: Signal[ - _SignalCallback[TraceConnectionQueuedEndParams] - ] = Signal(self) - self._on_connection_create_start: Signal[ - _SignalCallback[TraceConnectionCreateStartParams] - ] = Signal(self) - self._on_connection_create_end: Signal[ - _SignalCallback[TraceConnectionCreateEndParams] - ] = Signal(self) - self._on_connection_reuseconn: Signal[ - _SignalCallback[TraceConnectionReuseconnParams] - ] = Signal(self) - self._on_dns_resolvehost_start: Signal[ - _SignalCallback[TraceDnsResolveHostStartParams] - ] = Signal(self) - self._on_dns_resolvehost_end: Signal[ - _SignalCallback[TraceDnsResolveHostEndParams] - ] = Signal(self) - self._on_dns_cache_hit: Signal[ - _SignalCallback[TraceDnsCacheHitParams] - ] = Signal(self) - self._on_dns_cache_miss: Signal[ - _SignalCallback[TraceDnsCacheMissParams] - ] = Signal(self) - self._on_request_headers_sent: Signal[ - _SignalCallback[TraceRequestHeadersSentParams] - ] = Signal(self) - - self._trace_config_ctx_factory = trace_config_ctx_factory - - def trace_config_ctx( - self, trace_request_ctx: Optional[SimpleNamespace] = None - ) -> SimpleNamespace: - """Return a new trace_config_ctx instance""" - return self._trace_config_ctx_factory(trace_request_ctx=trace_request_ctx) - - def freeze(self) -> None: - self._on_request_start.freeze() - self._on_request_chunk_sent.freeze() - self._on_response_chunk_received.freeze() - self._on_request_end.freeze() - self._on_request_exception.freeze() - self._on_request_redirect.freeze() - self._on_connection_queued_start.freeze() - self._on_connection_queued_end.freeze() - self._on_connection_create_start.freeze() - self._on_connection_create_end.freeze() - self._on_connection_reuseconn.freeze() - self._on_dns_resolvehost_start.freeze() - self._on_dns_resolvehost_end.freeze() - self._on_dns_cache_hit.freeze() - self._on_dns_cache_miss.freeze() - self._on_request_headers_sent.freeze() - - @property - def on_request_start(self) -> "Signal[_SignalCallback[TraceRequestStartParams]]": - return self._on_request_start - - @property - def on_request_chunk_sent( - self, - ) -> "Signal[_SignalCallback[TraceRequestChunkSentParams]]": - return self._on_request_chunk_sent - - @property - def on_response_chunk_received( - self, - ) -> "Signal[_SignalCallback[TraceResponseChunkReceivedParams]]": - return self._on_response_chunk_received - - @property - def on_request_end(self) -> "Signal[_SignalCallback[TraceRequestEndParams]]": - return self._on_request_end - - @property - def on_request_exception( - self, - ) -> "Signal[_SignalCallback[TraceRequestExceptionParams]]": - return self._on_request_exception - - @property - def on_request_redirect( - self, - ) -> "Signal[_SignalCallback[TraceRequestRedirectParams]]": - return self._on_request_redirect - - @property - def on_connection_queued_start( - self, - ) -> "Signal[_SignalCallback[TraceConnectionQueuedStartParams]]": - return self._on_connection_queued_start - - @property - def on_connection_queued_end( - self, - ) -> "Signal[_SignalCallback[TraceConnectionQueuedEndParams]]": - return self._on_connection_queued_end - - @property - def on_connection_create_start( - self, - ) -> "Signal[_SignalCallback[TraceConnectionCreateStartParams]]": - return self._on_connection_create_start - - @property - def on_connection_create_end( - self, - ) -> "Signal[_SignalCallback[TraceConnectionCreateEndParams]]": - return self._on_connection_create_end - - @property - def on_connection_reuseconn( - self, - ) -> "Signal[_SignalCallback[TraceConnectionReuseconnParams]]": - return self._on_connection_reuseconn - - @property - def on_dns_resolvehost_start( - self, - ) -> "Signal[_SignalCallback[TraceDnsResolveHostStartParams]]": - return self._on_dns_resolvehost_start - - @property - def on_dns_resolvehost_end( - self, - ) -> "Signal[_SignalCallback[TraceDnsResolveHostEndParams]]": - return self._on_dns_resolvehost_end - - @property - def on_dns_cache_hit(self) -> "Signal[_SignalCallback[TraceDnsCacheHitParams]]": - return self._on_dns_cache_hit - - @property - def on_dns_cache_miss(self) -> "Signal[_SignalCallback[TraceDnsCacheMissParams]]": - return self._on_dns_cache_miss - - @property - def on_request_headers_sent( - self, - ) -> "Signal[_SignalCallback[TraceRequestHeadersSentParams]]": - return self._on_request_headers_sent - - -@attr.s(auto_attribs=True, frozen=True, slots=True) -class TraceRequestStartParams: - """Parameters sent by the `on_request_start` signal""" - - method: str - url: URL - headers: "CIMultiDict[str]" - - -@attr.s(auto_attribs=True, frozen=True, slots=True) -class TraceRequestChunkSentParams: - """Parameters sent by the `on_request_chunk_sent` signal""" - - method: str - url: URL - chunk: bytes - - -@attr.s(auto_attribs=True, frozen=True, slots=True) -class TraceResponseChunkReceivedParams: - """Parameters sent by the `on_response_chunk_received` signal""" - - method: str - url: URL - chunk: bytes - - -@attr.s(auto_attribs=True, frozen=True, slots=True) -class TraceRequestEndParams: - """Parameters sent by the `on_request_end` signal""" - - method: str - url: URL - headers: "CIMultiDict[str]" - response: ClientResponse - - -@attr.s(auto_attribs=True, frozen=True, slots=True) -class TraceRequestExceptionParams: - """Parameters sent by the `on_request_exception` signal""" - - method: str - url: URL - headers: "CIMultiDict[str]" - exception: BaseException - - -@attr.s(auto_attribs=True, frozen=True, slots=True) -class TraceRequestRedirectParams: - """Parameters sent by the `on_request_redirect` signal""" - - method: str - url: URL - headers: "CIMultiDict[str]" - response: ClientResponse - - -@attr.s(auto_attribs=True, frozen=True, slots=True) -class TraceConnectionQueuedStartParams: - """Parameters sent by the `on_connection_queued_start` signal""" - - -@attr.s(auto_attribs=True, frozen=True, slots=True) -class TraceConnectionQueuedEndParams: - """Parameters sent by the `on_connection_queued_end` signal""" - - -@attr.s(auto_attribs=True, frozen=True, slots=True) -class TraceConnectionCreateStartParams: - """Parameters sent by the `on_connection_create_start` signal""" - - -@attr.s(auto_attribs=True, frozen=True, slots=True) -class TraceConnectionCreateEndParams: - """Parameters sent by the `on_connection_create_end` signal""" - - -@attr.s(auto_attribs=True, frozen=True, slots=True) -class TraceConnectionReuseconnParams: - """Parameters sent by the `on_connection_reuseconn` signal""" - - -@attr.s(auto_attribs=True, frozen=True, slots=True) -class TraceDnsResolveHostStartParams: - """Parameters sent by the `on_dns_resolvehost_start` signal""" - - host: str - - -@attr.s(auto_attribs=True, frozen=True, slots=True) -class TraceDnsResolveHostEndParams: - """Parameters sent by the `on_dns_resolvehost_end` signal""" - - host: str - - -@attr.s(auto_attribs=True, frozen=True, slots=True) -class TraceDnsCacheHitParams: - """Parameters sent by the `on_dns_cache_hit` signal""" - - host: str - - -@attr.s(auto_attribs=True, frozen=True, slots=True) -class TraceDnsCacheMissParams: - """Parameters sent by the `on_dns_cache_miss` signal""" - - host: str - - -@attr.s(auto_attribs=True, frozen=True, slots=True) -class TraceRequestHeadersSentParams: - """Parameters sent by the `on_request_headers_sent` signal""" - - method: str - url: URL - headers: "CIMultiDict[str]" - - -class Trace: - """Internal dependency holder class. - - Used to keep together the main dependencies used - at the moment of send a signal. - """ - - def __init__( - self, - session: "ClientSession", - trace_config: TraceConfig, - trace_config_ctx: SimpleNamespace, - ) -> None: - self._trace_config = trace_config - self._trace_config_ctx = trace_config_ctx - self._session = session - - async def send_request_start( - self, method: str, url: URL, headers: "CIMultiDict[str]" - ) -> None: - return await self._trace_config.on_request_start.send( - self._session, - self._trace_config_ctx, - TraceRequestStartParams(method, url, headers), - ) - - async def send_request_chunk_sent( - self, method: str, url: URL, chunk: bytes - ) -> None: - return await self._trace_config.on_request_chunk_sent.send( - self._session, - self._trace_config_ctx, - TraceRequestChunkSentParams(method, url, chunk), - ) - - async def send_response_chunk_received( - self, method: str, url: URL, chunk: bytes - ) -> None: - return await self._trace_config.on_response_chunk_received.send( - self._session, - self._trace_config_ctx, - TraceResponseChunkReceivedParams(method, url, chunk), - ) - - async def send_request_end( - self, - method: str, - url: URL, - headers: "CIMultiDict[str]", - response: ClientResponse, - ) -> None: - return await self._trace_config.on_request_end.send( - self._session, - self._trace_config_ctx, - TraceRequestEndParams(method, url, headers, response), - ) - - async def send_request_exception( - self, - method: str, - url: URL, - headers: "CIMultiDict[str]", - exception: BaseException, - ) -> None: - return await self._trace_config.on_request_exception.send( - self._session, - self._trace_config_ctx, - TraceRequestExceptionParams(method, url, headers, exception), - ) - - async def send_request_redirect( - self, - method: str, - url: URL, - headers: "CIMultiDict[str]", - response: ClientResponse, - ) -> None: - return await self._trace_config._on_request_redirect.send( - self._session, - self._trace_config_ctx, - TraceRequestRedirectParams(method, url, headers, response), - ) - - async def send_connection_queued_start(self) -> None: - return await self._trace_config.on_connection_queued_start.send( - self._session, self._trace_config_ctx, TraceConnectionQueuedStartParams() - ) - - async def send_connection_queued_end(self) -> None: - return await self._trace_config.on_connection_queued_end.send( - self._session, self._trace_config_ctx, TraceConnectionQueuedEndParams() - ) - - async def send_connection_create_start(self) -> None: - return await self._trace_config.on_connection_create_start.send( - self._session, self._trace_config_ctx, TraceConnectionCreateStartParams() - ) - - async def send_connection_create_end(self) -> None: - return await self._trace_config.on_connection_create_end.send( - self._session, self._trace_config_ctx, TraceConnectionCreateEndParams() - ) - - async def send_connection_reuseconn(self) -> None: - return await self._trace_config.on_connection_reuseconn.send( - self._session, self._trace_config_ctx, TraceConnectionReuseconnParams() - ) - - async def send_dns_resolvehost_start(self, host: str) -> None: - return await self._trace_config.on_dns_resolvehost_start.send( - self._session, self._trace_config_ctx, TraceDnsResolveHostStartParams(host) - ) - - async def send_dns_resolvehost_end(self, host: str) -> None: - return await self._trace_config.on_dns_resolvehost_end.send( - self._session, self._trace_config_ctx, TraceDnsResolveHostEndParams(host) - ) - - async def send_dns_cache_hit(self, host: str) -> None: - return await self._trace_config.on_dns_cache_hit.send( - self._session, self._trace_config_ctx, TraceDnsCacheHitParams(host) - ) - - async def send_dns_cache_miss(self, host: str) -> None: - return await self._trace_config.on_dns_cache_miss.send( - self._session, self._trace_config_ctx, TraceDnsCacheMissParams(host) - ) - - async def send_request_headers( - self, method: str, url: URL, headers: "CIMultiDict[str]" - ) -> None: - return await self._trace_config._on_request_headers_sent.send( - self._session, - self._trace_config_ctx, - TraceRequestHeadersSentParams(method, url, headers), - ) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/parallel_coordinates.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/parallel_coordinates.py deleted file mode 100644 index 2f977dbf543d6d0bd5fe985bf22c6441e71261d2..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/parallel_coordinates.py +++ /dev/null @@ -1,28 +0,0 @@ -""" -Parallel Coordinates Example ----------------------------- -A `Parallel Coordinates `_ -chart is a chart that lets you visualize the individual data points by drawing -a single line for each of them. -Such a chart can be created in Altair by first transforming the data into a -suitable representation. -This example shows a parallel coordinates chart with the Iris dataset. -""" -# category: other charts - -import altair as alt -from vega_datasets import data - -source = data.iris() - -alt.Chart(source).transform_window( - index='count()' -).transform_fold( - ['petalLength', 'petalWidth', 'sepalLength', 'sepalWidth'] -).mark_line().encode( - x='key:N', - y='value:Q', - color='species:N', - detail='index:N', - opacity=alt.value(0.5) -).properties(width=500) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/attrs/validators.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/attrs/validators.py deleted file mode 100644 index ab2c9b3024714d3b1caeb2f0773a0274dfc10f01..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/attrs/validators.py +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-License-Identifier: MIT - -from attr.validators import * # noqa diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/criterions/label_smoothed_cross_entropy_with_alignment.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/criterions/label_smoothed_cross_entropy_with_alignment.py deleted file mode 100644 index 2ea37c16b4a477c48e4dd4500ec03f2d0c86d611..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/criterions/label_smoothed_cross_entropy_with_alignment.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math - -from fairseq import metrics, utils -from fairseq.criterions import register_criterion - -from .label_smoothed_cross_entropy import ( - LabelSmoothedCrossEntropyCriterion, - LabelSmoothedCrossEntropyCriterionConfig, -) - -from dataclasses import dataclass, field - - -@dataclass -class LabelSmoothedCrossEntropyCriterionWithAlignmentConfig( - LabelSmoothedCrossEntropyCriterionConfig -): - alignment_lambda: float = field( - default=0.05, metadata={"help": "weight for the alignment loss"} - ) - - -@register_criterion( - "label_smoothed_cross_entropy_with_alignment", - dataclass=LabelSmoothedCrossEntropyCriterionWithAlignmentConfig, -) -class LabelSmoothedCrossEntropyCriterionWithAlignment( - LabelSmoothedCrossEntropyCriterion -): - def __init__(self, task, sentence_avg, label_smoothing, alignment_lambda): - super().__init__(task, sentence_avg, label_smoothing) - self.alignment_lambda = alignment_lambda - - def forward(self, model, sample, reduce=True): - """Compute the loss for the given sample. - - Returns a tuple with three elements: - 1) the loss - 2) the sample size, which is used as the denominator for the gradient - 3) logging outputs to display while training - """ - net_output = model(**sample["net_input"]) - loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce) - sample_size = ( - sample["target"].size(0) if self.sentence_avg else sample["ntokens"] - ) - logging_output = { - "loss": utils.item(loss.data) if reduce else loss.data, - "nll_loss": utils.item(nll_loss.data) if reduce else nll_loss.data, - "ntokens": sample["ntokens"], - "nsentences": sample["target"].size(0), - "sample_size": sample_size, - } - - alignment_loss = None - - # Compute alignment loss only for training set and non dummy batches. - if "alignments" in sample and sample["alignments"] is not None: - alignment_loss = self.compute_alignment_loss(sample, net_output) - - if alignment_loss is not None: - logging_output["alignment_loss"] = utils.item(alignment_loss.data) - loss += self.alignment_lambda * alignment_loss - - return loss, sample_size, logging_output - - def compute_alignment_loss(self, sample, net_output): - attn_prob = net_output[1]["attn"][0] - bsz, tgt_sz, src_sz = attn_prob.shape - attn = attn_prob.view(bsz * tgt_sz, src_sz) - - align = sample["alignments"] - align_weights = sample["align_weights"].float() - - if len(align) > 0: - # Alignment loss computation. align (shape [:, 2]) contains the src-tgt index pairs corresponding to - # the alignments. align_weights (shape [:]) contains the 1 / frequency of a tgt index for normalizing. - loss = -( - (attn[align[:, 1][:, None], align[:, 0][:, None]]).log() - * align_weights[:, None] - ).sum() - else: - return None - - return loss - - @staticmethod - def reduce_metrics(logging_outputs) -> None: - """Aggregate logging outputs from data parallel training.""" - loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) - nll_loss_sum = utils.item( - sum(log.get("nll_loss", 0) for log in logging_outputs) - ) - alignment_loss_sum = utils.item( - sum(log.get("alignment_loss", 0) for log in logging_outputs) - ) - ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs)) - sample_size = utils.item( - sum(log.get("sample_size", 0) for log in logging_outputs) - ) - - metrics.log_scalar( - "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 - ) - metrics.log_scalar( - "nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3 - ) - metrics.log_scalar( - "alignment_loss", - alignment_loss_sum / sample_size / math.log(2), - sample_size, - round=3, - ) - metrics.log_derived( - "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg) - ) - - @staticmethod - def logging_outputs_can_be_summed() -> bool: - """ - Whether the logging outputs returned by `forward` can be summed - across workers prior to calling `reduce_metrics`. Setting this - to True will improves distributed training speed. - """ - return True diff --git a/spaces/aryadytm/photo-colorization/app.py b/spaces/aryadytm/photo-colorization/app.py deleted file mode 100644 index 6d9b1b5bfaefddaac0f91fa9693e06cfaf21e2bf..0000000000000000000000000000000000000000 --- a/spaces/aryadytm/photo-colorization/app.py +++ /dev/null @@ -1,158 +0,0 @@ -# Based on: https://github.com/jantic/DeOldify -import os, re, time - -os.environ["TORCH_HOME"] = os.path.join(os.getcwd(), ".cache") -os.environ["XDG_CACHE_HOME"] = os.path.join(os.getcwd(), ".cache") - -import streamlit as st -import PIL -import cv2 -import numpy as np -import uuid -from zipfile import ZipFile, ZIP_DEFLATED -from io import BytesIO -from random import randint -from datetime import datetime - -from src.deoldify import device -from src.deoldify.device_id import DeviceId -from src.deoldify.visualize import * -from src.app_utils import get_model_bin - - -device.set(device=DeviceId.CPU) - - -@st.cache(allow_output_mutation=True, show_spinner=False) -def load_model(model_dir, option): - if option.lower() == 'artistic': - model_url = 'https://data.deepai.org/deoldify/ColorizeArtistic_gen.pth' - get_model_bin(model_url, os.path.join(model_dir, "ColorizeArtistic_gen.pth")) - colorizer = get_image_colorizer(artistic=True) - elif option.lower() == 'stable': - model_url = "https://www.dropbox.com/s/usf7uifrctqw9rl/ColorizeStable_gen.pth?dl=0" - get_model_bin(model_url, os.path.join(model_dir, "ColorizeStable_gen.pth")) - colorizer = get_image_colorizer(artistic=False) - - return colorizer - - -def resize_img(input_img, max_size): - img = input_img.copy() - img_height, img_width = img.shape[0],img.shape[1] - - if max(img_height, img_width) > max_size: - if img_height > img_width: - new_width = img_width*(max_size/img_height) - new_height = max_size - resized_img = cv2.resize(img,(int(new_width), int(new_height))) - return resized_img - - elif img_height <= img_width: - new_width = img_height*(max_size/img_width) - new_height = max_size - resized_img = cv2.resize(img,(int(new_width), int(new_height))) - return resized_img - - return img - - -def colorize_image(pil_image, img_size=800) -> "PIL.Image": - # Open the image - pil_img = pil_image.convert("RGB") - img_rgb = np.array(pil_img) - resized_img_rgb = resize_img(img_rgb, img_size) - resized_pil_img = PIL.Image.fromarray(resized_img_rgb) - - # Send the image to the model - output_pil_img = colorizer.plot_transformed_pil_image(resized_pil_img, render_factor=35, compare=False) - - return output_pil_img - - -def image_download_button(pil_image, filename: str, fmt: str, label="Download"): - if fmt not in ["jpg", "png"]: - raise Exception(f"Unknown image format (Available: {fmt} - case sensitive)") - - pil_format = "JPEG" if fmt == "jpg" else "PNG" - file_format = "jpg" if fmt == "jpg" else "png" - mime = "image/jpeg" if fmt == "jpg" else "image/png" - - buf = BytesIO() - pil_image.save(buf, format=pil_format) - - return st.download_button( - label=label, - data=buf.getvalue(), - file_name=f'{filename}.{file_format}', - mime=mime, - ) - - -########################### -###### STREAMLIT CODE ##### -########################### - - -st_color_option = "Artistic" - -# Load models -try: - with st.spinner("Loading..."): - print('before loading the model') - colorizer = load_model('models/', st_color_option) - print('after loading the model') - -except Exception as e: - colorizer = None - print('Error while loading the model. Please refresh the page') - print(e) - st.write("**App loading error. Please try again later.**") - - - -if colorizer is not None: - st.title("AI Photo Colorization") - - st.image(open("assets/demo.jpg", "rb").read()) - - st.markdown( - """ - Colorizing black & white photo can be expensive and time consuming. We introduce AI that can colorize - grayscale photo in seconds. **Just upload your grayscale image, then click colorize.** - """ - ) - - uploaded_file = st.file_uploader("Upload photo", accept_multiple_files=False, type=["png", "jpg", "jpeg"]) - - if uploaded_file is not None: - bytes_data = uploaded_file.getvalue() - img_input = PIL.Image.open(BytesIO(bytes_data)).convert("RGB") - - with st.expander("Original photo", True): - st.image(img_input) - - if st.button("Colorize!") and uploaded_file is not None: - - with st.spinner("AI is doing the magic!"): - img_output = colorize_image(img_input) - img_output = img_output.resize(img_input.size) - - # NOTE: Calm! I'm not logging the input and outputs. - # It is impossible to access the filesystem in spaces environment. - now = datetime.now().strftime("%Y%m%d-%H%M%S-%f") - img_input.convert("RGB").save(f"./output/{now}-input.jpg") - img_output.convert("RGB").save(f"./output/{now}-output.jpg") - - st.write("AI has finished the job!") - st.image(img_output) - # reuse = st.button('Edit again (Re-use this image)', on_click=set_image, args=(inpainted_img, )) - - uploaded_name = os.path.splitext(uploaded_file.name)[0] - image_download_button( - pil_image=img_output, - filename=uploaded_name, - fmt="jpg", - label="Download Image" - ) - diff --git a/spaces/asciicorp/Legal-ai/help_text.py b/spaces/asciicorp/Legal-ai/help_text.py deleted file mode 100644 index 546387481b23b415db1baadb3162f8ec7485c83d..0000000000000000000000000000000000000000 --- a/spaces/asciicorp/Legal-ai/help_text.py +++ /dev/null @@ -1,10 +0,0 @@ -HELP_TEXT = { - "Select a model": "Select a language model to use for generating text. The 'text-davinci-003' model is the most capable and expensive, while 'gpt-3.5-turbo-0301' is a cheaper and less capable model.", - "LLM Temperature": "Controls the randomness of the generated text. Higher values lead to more random text, while lower values lead to more predictable text.", - "Max Tokens": "The maximum number of tokens (words or subwords) that the language model can generate. Larger values lead to longer text.", - "Frequency Penalty": "Controls how much the language model avoids repeating the same phrases or patterns in the generated text.", - "Presence Penalty": "Controls how much the language model avoids using words or phrases that were already used in the input text or previous generations.", - "Select text splitter": "Select a text splitter to use for splitting the input text into smaller chunks. 'RecursiveCharacterTextSplitter' is the default and recommended option, while 'CharacterTextSplitter' is faster but may produce lower quality results.", - "Chunk size": "The size of each chunk of text that is generated by the language model. Larger values lead to longer chunks and fewer overall chunks.", - "Chunk overlap": "The amount of overlap between adjacent chunks of text. Larger values lead to more overlap and smoother transitions between chunks, but may also result in more repetition and less diversity in the generated text." -} diff --git a/spaces/ashercn97/AsherTesting/extensions/openai/utils.py b/spaces/ashercn97/AsherTesting/extensions/openai/utils.py deleted file mode 100644 index 0c9441a3aa4ee06bdf7b9d3ea1d54c250e080b15..0000000000000000000000000000000000000000 --- a/spaces/ashercn97/AsherTesting/extensions/openai/utils.py +++ /dev/null @@ -1,29 +0,0 @@ -import os -import base64 -import numpy as np - - -def float_list_to_base64(float_list): - # Convert the list to a float32 array that the OpenAPI client expects - float_array = np.array(float_list, dtype="float32") - - # Get raw bytes - bytes_array = float_array.tobytes() - - # Encode bytes into base64 - encoded_bytes = base64.b64encode(bytes_array) - - # Turn raw base64 encoded bytes into ASCII - ascii_string = encoded_bytes.decode('ascii') - return ascii_string - - -def end_line(s): - if s and s[-1] != '\n': - s = s + '\n' - return s - - -def debug_msg(*args, **kwargs): - if 'OPENEDAI_DEBUG' in os.environ: - print(*args, **kwargs) diff --git a/spaces/ashishraics/FillTheBlanks/Dockerfile b/spaces/ashishraics/FillTheBlanks/Dockerfile deleted file mode 100644 index d2b85ad19bfb43c0f46d9370f28e8c509ff29f28..0000000000000000000000000000000000000000 --- a/spaces/ashishraics/FillTheBlanks/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM python:3.9-slim-buster -COPY . /app -WORKDIR /app -RUN pip3 install -r requirements.txt -EXPOSE 8501 -CMD ["streamlit","run","app.py"] \ No newline at end of file diff --git a/spaces/autosummproject/autosumm/extractor/_utils.py b/spaces/autosummproject/autosumm/extractor/_utils.py deleted file mode 100644 index 390cdb91d1fba33ad95b37dd8bc11cbfda6a74fc..0000000000000000000000000000000000000000 --- a/spaces/autosummproject/autosumm/extractor/_utils.py +++ /dev/null @@ -1,121 +0,0 @@ -import nmslib -import numpy as np -import streamlit as st -# import inflect -import torch -from os import environ - -# p = inflect.engine() - -class FewDocumentsError(Exception): - def __init__(self, documents, size, msg): - self.documents = documents - self.size = size - self.msg = msg - - def __str__(self): - return repr(self.msg) - -def document_extraction(dataset, query, keywords, min_document_size, min_just_one_paragraph_size): - # TODO: compare inflected forms - # word_in_text = lambda word, text: any([p.compare(word, w) for w in text.split()]) - word_in_text = lambda word, text: word in set(text.split()) - lower_dataset = [document.lower() for document in dataset] - lower_query = query.lower() - lower_keywords = [keyword.lower() for keyword in keywords] - - if environ['PORTUGUESE'] == 'true': - portuguese = True - elif environ['PORTUGUESE'] == 'false': - portuguese = False - else: - raise EnvironmentError - - documents = {} - - documents['QUERY'] = [ - dataset[lower_dataset.index(document)] for document in lower_dataset - if (word_in_text(lower_query, document)) - and (len(document.split()) > min_document_size) - and any(len(paragraph.split()) > min_just_one_paragraph_size for paragraph in document.splitlines()) - ] - - documents['AND'] = [ - dataset[lower_dataset.index(document)] for document in lower_dataset - if all(word_in_text(keyword, document) for keyword in lower_keywords) - and (len(document.split()) > min_document_size) - and any(len(paragraph.split()) > min_just_one_paragraph_size for paragraph in document.splitlines()) - ] - - documents['OR'] = [ - dataset[lower_dataset.index(document)] for document in lower_dataset - if any(word_in_text(keyword, document) for keyword in lower_keywords) - and (len(document.split()) > min_document_size) - and any(len(paragraph.split()) > min_just_one_paragraph_size for paragraph in document.splitlines()) - ] - - empty = { - 'QUERY': len(documents['QUERY']) == 0, - 'AND': len(documents['AND']) == 0, - 'OR': len(documents['OR']) == 0 - } - - sizes = { - 'QUERY': len(documents['QUERY']), - 'AND': len(documents['AND']), - 'OR': len(documents['OR']) - } - - if all(empty.values()): - # TODO: throw error - st.info(empty.values()) - if portuguese: - st.warning(f'Nenhum documento encontrado para a query "{query}", por favor, tente com outra query') - else: - st.warning(f'No document found for the query "{query}", please try with another query') - st.stop() - - if sizes['QUERY'] >= 10: - extracted_documents = documents['QUERY'] - elif sizes['AND'] >= 10: - extracted_documents = documents['AND'] - elif sizes['OR'] >= 10: - extracted_documents = documents['OR'] - else: - number_of_documents = sizes['OR'] - if portuguese: - raise FewDocumentsError(documents['OR'], number_of_documents, - f'Somente {number_of_documents} documentos encontrados para a query "{query}".\ - Por favor selecione "Prosseguir" para prosseguir com {number_of_documents} documentos ou tente novamente com outra query' - ) - else: - raise FewDocumentsError(documents['OR'], number_of_documents, - f'Only {number_of_documents} documents found for the query "{query}".\ - Please select "Proceed" to proceed with {number_of_documents} documents or try again with another query' - ) - - return extracted_documents, empty, sizes - -def paragraph_extraction(documents, min_paragraph_size): - paragraphs = [ - documents[i].splitlines()[j] for i in range(len(documents)) for j in range(len(documents[i].splitlines())) - if (len(documents[i].splitlines()[j].split()) > min_paragraph_size) - ] - - return paragraphs - -def semantic_search(model, query, files, number_of_similar_files): - encoded_query = model.encode(query) - encoded_files = model.encode(files) - - model_index = nmslib.init(method='hnsw', space='angulardist') - model_index.addDataPointBatch(encoded_files) - model_index.createIndex({'post': 2}) - - ids, distances = model_index.knnQuery(encoded_query, k=number_of_similar_files) - - selected_files = [files[index] for index in ids] - - distances = 180*distances/np.pi - - return selected_files, distances; \ No newline at end of file diff --git a/spaces/avivdm1/AutoGPT/autogpt/agent/agent_manager.py b/spaces/avivdm1/AutoGPT/autogpt/agent/agent_manager.py deleted file mode 100644 index 898767a485e50b5e62625a7883edf1b30d5fddf9..0000000000000000000000000000000000000000 --- a/spaces/avivdm1/AutoGPT/autogpt/agent/agent_manager.py +++ /dev/null @@ -1,103 +0,0 @@ -"""Agent manager for managing GPT agents""" -from __future__ import annotations - -from typing import Union - -from autogpt.config.config import Singleton -from autogpt.llm_utils import create_chat_completion - - -class AgentManager(metaclass=Singleton): - """Agent manager for managing GPT agents""" - - def __init__(self): - self.next_key = 0 - self.agents = {} # key, (task, full_message_history, model) - - # Create new GPT agent - # TODO: Centralise use of create_chat_completion() to globally enforce token limit - - def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]: - """Create a new agent and return its key - - Args: - task: The task to perform - prompt: The prompt to use - model: The model to use - - Returns: - The key of the new agent - """ - messages = [ - {"role": "user", "content": prompt}, - ] - - # Start GPT instance - agent_reply = create_chat_completion( - model=model, - messages=messages, - ) - - # Update full message history - messages.append({"role": "assistant", "content": agent_reply}) - - key = self.next_key - # This is done instead of len(agents) to make keys unique even if agents - # are deleted - self.next_key += 1 - - self.agents[key] = (task, messages, model) - - return key, agent_reply - - def message_agent(self, key: str | int, message: str) -> str: - """Send a message to an agent and return its response - - Args: - key: The key of the agent to message - message: The message to send to the agent - - Returns: - The agent's response - """ - task, messages, model = self.agents[int(key)] - - # Add user message to message history before sending to agent - messages.append({"role": "user", "content": message}) - - # Start GPT instance - agent_reply = create_chat_completion( - model=model, - messages=messages, - ) - - # Update full message history - messages.append({"role": "assistant", "content": agent_reply}) - - return agent_reply - - def list_agents(self) -> list[tuple[str | int, str]]: - """Return a list of all agents - - Returns: - A list of tuples of the form (key, task) - """ - - # Return a list of agent keys and their tasks - return [(key, task) for key, (task, _, _) in self.agents.items()] - - def delete_agent(self, key: Union[str, int]) -> bool: - """Delete an agent from the agent manager - - Args: - key: The key of the agent to delete - - Returns: - True if successful, False otherwise - """ - - try: - del self.agents[int(key)] - return True - except KeyError: - return False diff --git a/spaces/awacke1/AI-ChatGPT-Provider/app.py b/spaces/awacke1/AI-ChatGPT-Provider/app.py deleted file mode 100644 index a824a3f221406c91938ab04e2822230a1fafea2d..0000000000000000000000000000000000000000 --- a/spaces/awacke1/AI-ChatGPT-Provider/app.py +++ /dev/null @@ -1,634 +0,0 @@ -import streamlit as st - -st.set_page_config(layout="wide") - -st.markdown(""" -📊Costly Condition 📑EDI 278A Request 📨ADT Message Type 🏥ADT Event 📄Clinical Document Example - -🩸Diabetes: “Diabetes Mellitus”💉AND (“Patient Admission” OR “Patient Discharge”)👩‍⚕️ADT^A01 💊PATIENT ADMIT/DISCHARGE 📝Discharge Summary - -❤️Heart Disease: “Heart Diseases”❤️AND (“Patient Admission” OR “Patient Discharge”)👩‍⚕️ADT^A01 💊PATIENT ADMIT/DISCHARGE 📝Discharge Summary - -😔Anxiety & Depression: “Depression” or “Anxiety”😢AND (“Patient Admission” OR “Patient Discharge”)👩‍⚕️ADT^A01💊PATIENT ADMIT/DISCHARGE 📝Discharge Summary - -🦴Musculoskeletal Disorders: “Musculoskeletal Diseases”🦴AND (“Patient Admission” OR “Patient Discharge”)👩‍⚕️ADT^A01 💊PATIENT ADMIT/DISCHARGE 📝Discharge Summary - -🤔What are MeSH terms❓ MeSH terms help sort articles in PubMed by topic. Here are the MeSH terms used for each costly condition: - -1. 🩸Diabetes: “Diabetes Mellitus”💉- high blood sugar levels disease - -2. ❤️Heart Disease: “Heart Diseases”❤️- troubles in the heart - -3. 😔Anxiety & Depression: “Depression” or “Anxiety”😢- feelings of sadness, worry and fear - -4. 🦴Musculoskeletal Disorders: “Musculoskeletal Diseases”🦴- problems in bones, muscles, and joints. -Costly Condition EDI 278A Request ADT Message Type ADT Event Clinical Document Example -Diabetes “Diabetes Mellitus”[MeSH Terms] AND (“Patient Admission”[Mesh] OR “Patient Discharge”[Mesh]) ADT^A01 PATIENT ADMIT/DISCHARGE Discharge Summary -Heart Disease “Heart Diseases”[MeSH Terms] AND (“Patient Admission”[Mesh] OR “Patient Discharge”[Mesh]) ADT^A01 PATIENT ADMIT/DISCHARGE Discharge Summary -Anxiety & Depression “Depression”[MeSH Terms] OR “Anxiety”[MeSH Terms] AND (“Patient Admission”[Mesh] OR “Patient Discharge”[Mesh]) ADT^A01 PATIENT ADMIT/DISCHARGE Discharge Summary -Musculoskeletal Disorders “Musculoskeletal Diseases”[MeSH Terms] AND (“Patient Admission”[Mesh] OR “Patient Discharge”[Mesh]) ADT^A01 PATIENT ADMIT/DISCHARGE Discharge Summary -explain the mesh terms for each -- MeSH terms are a way of categorizing and organizing articles in PubMed based on their subject matter. The MeSH terms used for each costly condition in the table are as follows: - -1. Diabetes: “Diabetes Mellitus” is a MeSH term used to describe a group of metabolic diseases characterized by high blood sugar levels. - -2. Heart Disease: “Heart Diseases” is a MeSH term used to describe a variety of conditions that affect the heart. - -3. Anxiety & Depression: “Depression” and “Anxiety” are separate MeSH terms used to describe mood disorders characterized by persistent feelings of sadness and/or anxiety. - -4. Musculoskeletal Disorders: “Musculoskeletal Diseases” is a MeSH term used to describe a variety of conditions that affect the bones, muscles, and joints. - -Costly Condition EDI 278A Request ADT Message Type ADT Event Clinical Document Example -Diabetes -Heart Disease -Anxiety & Depression -Musculoskeletal Disorders - -ISA*00* *00* *ZZ*SENDERID *ZZ*RECEIVERID *220505*1200*^*00501*000000001*0*P*:~ -GS*HI*SENDERID*RECEIVERID*20220505*1200*1*X*005010X217~ -ST*278*0001~ -BHT*0078*11*10001234*20220505*1200*16~ -HL*1**20*1~ -NM1*X3*2*PAYER NAME*****PI*PAYERID~ -HL*2*1*21*1~ -NM1*1P*2*PROVIDER NAME*****XX*PROVIDER NPI~ -HL*3*2*22*1~ -NM1*IL*1*LAST NAME*FIRST NAME****MI*MEMBERID~ -TRN*1*REFNUM*ABCDEFGHIJ~ -UM*HS*I*1~ -HCR*A3*APPROVAL NUMBER~ -DTP*472*D8*20220506~ -HI*ABK:DIAGNOSIS CODE~ -MSG*Prior authorization approved for the requested service.~ -SE*13*0001~ -GE*1*1~ -IEA*1*000000001~ - -Description of each part: - -ISA: Interchange Control Header segment, contains sender and receiver information. -GS: Functional Group Header segment, contains functional group information. -ST: Transaction Set Header segment, marks the start of the transaction set. -BHT: Beginning of Hierarchical Transaction segment, contains transaction set information. -HL: Hierarchical Level segment, used to define hierarchical structure. -NM1: Name segment, used to identify entities such as payer, provider, and patient. -TRN: Trace segment, contains reference number for the transaction. -UM: Health Care Services Review segment, contains review information. -HCR: Health Care Services Review segment, contains review decision information. -DTP: Date/Time Qualifier segment, contains relevant dates. -HI: Health Care Information Codes segment, contains diagnosis codes. -MSG: Message Text segment, contains human-readable messages. -SE: Transaction Set Trailer segment, marks the end of the transaction set. -GE: Functional Group Trailer segment, marks the end of the functional group. -IEA: Interchange Control Trailer segment, marks the end of the interchange. -For the four high-cost service packages (Diabetes, Heart Disease, Anxiety & Depression, Musculoskeletal Disorders), the diagnosis list and clinical evidence for prior authorization would be included in the HI and MSG segments, respectively. The diagnosis codes would be specified in the HI segment, while the clinical evidence and any additional information would be provided in the MSG segment. - - - - -# In Context Learning:| Costly Condition | EDI 278A Request | ADT Message Type | ADT Event | Clinical Document Example | -|---------------------------|-----------------------------------|------------------|-----------|----------------------------------------------| -| Diabetes | REF|1234567890|DOE^JOHN||20230505120000|19730101|M|123 Main St^^Anytown^NC^12345^^^||(555)555-1234|(555)555-5678||123456789|999-99-9999|||||||||| | ADT^A01 | A01 | Admitting a patient to the hospital for diabetes management | -| Heart Disease | RQ|111111|20181214|20181220|Acme Health Plan|1234 1st Street|Anytown|NC|12345| | | | | | | | ADT^A01 | A01 | Admitting a patient to the hospital for heart surgery | -| Anxiety & Depression | REF|1234567890|DOE^JOHN||20230505120000|19730101|M|123 Main St^^Anytown^NC^12345^^^||(555)555-1234|(555)555-5678||123456789|999-99-9999|||||||||| | ADT^A01 - - - - -| Costly Condition | EDI 278A Request | ADT Message Type | ADT Event | Clinical Document Example | -|---------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------|-----------|-------------------------------------------------------------------------------------------------------------------------------------------| -| Diabetes | | | | -| | | | | -| Heart Disease | | | | -| | | | | -| Anxiety & Depression | | | | -| | | | | -| Musculoskeletal Disorders | | | | -| | | | | -| Patient Information Update | MSH|^~\&|SendingApp|SendingFac|ReceivingApp|ReceivingFac|20230505120000||ADT^A08^ADT_A01|1234567|P|2.3|||||||||EVN|A08|20230505120000||||PID|1||123456789^^^MRN^MR||DOE^JOHN||20230505120000|M||||||||||||||||||PV1|1||||||||||||||||||| | -| | PRD|FR|SendingApp|SendingFac||||||| | ADT^A08 | A08 | Updating patient's demographic information | -| Diabetes | 0028|030| |Initial Claim Request | -| | CLM|123456789012345|10000| |17|01|Y| |7|Y|Y|Y|Y| | | |20120328| | | |0000000| |0000000|0000000| | -| Heart Disease | 0081|029| |Treatment Plan Request | -| | RQ|111111|20181214|20181220|Acme Health Plan|1234 1st Street|Anytown|NC|12345| | | | | | | | -| Anxiety & Depression | 0160|017| |Referral Request | -| | REF|1234567890|DOE^JOHN||20220505120000|19730101|M|123 Main St^^Anytown^NC^12345^^^||(555)555-1234|(555)555-5678||123456789|999-99-9999|||||||||| | | ADT^A01 | A01 | Admitting a patient to the hospital for a planned procedure | -| Musculoskeletal Disorders | 0450|012| |Authorization Request | -| | PRV|PC|1234 1st Street|Anytown|NC|12345|US|5555555555| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | - - -## Diabetes Clinical Document (for Teaching Purposes only) - -Patient: John Doe -DOB: 01/01/1960 -Diagnosis: Diabetes Mellitus Type 2 (ICD-10 E11.9) -Medications: Metformin 1000mg PO BID, Insulin glargine 20 units subcutaneous daily - -Assessment: -Mr. Doe's blood glucose levels have been well controlled with his current medication regimen. His most recent A1c was 7.0%. He reports occasional mild hypoglycemia. - -Plan: -Continue current medication regimen. Educate patient on the signs and symptoms of hypoglycemia and how to treat it. Schedule follow-up appointment in 3 months. - -Signed: Dr. Jane Smith - - -### EDI 278A Request Example - -| Field | Value | -|-------|-------| -| Transaction Code | 278 | -| Transaction Type | Request | -| Service Type Code | HST | -| Service Type | Healthcare Services | -| Sender ID | 123456 | -| Receiver ID | 987654 | -| Patient ID | 123-45-6789 | -| Patient Name | Doe, John | -| Diagnosis Code | E11.9 | -| Diagnosis Description | Type 2 diabetes mellitus without complications | -| Service Start Date | 2023-05-01 | -| Service End Date | 2023-05-01 | -| CPT Code | 99213 | -| CPT Description | Office/outpatient visit, established patient | -| CPT Modifier | | -| CPT Quantity | 1 | -| CPT Unit Price | 100.00 | -| CPT Total Price | 100.00 | -| ICD Code | E11.9 | -| ICD Description | Type 2 diabetes mellitus without complications | -| Service Line Control Number | 1 | - - - - -## Heart Disease Clinical Document - -Patient: Jane Smith -DOB: 05/01/1955 -Diagnosis: Myocardial Infarction (ICD-10 I21.3) -Medications: Aspirin 81mg PO daily, Metoprolol succinate 25mg PO daily, Atorvastatin 40mg PO daily - -Assessment: -Ms. Smith's cardiac function has improved since her myocardial infarction. She has had no chest pain or shortness of breath. Her most recent EKG showed no evidence of ischemia. - -Plan: -Continue current medication regimen. Schedule follow-up appointment in 6 months. - -Signed: Dr. John Doe - -### EDI 278A Request Example - -| Field | Value | -|-------|-------| -| Transaction Code | 278 | -| Transaction Type | Request | -| Service Type Code | HST | -| Service Type | Healthcare Services | -| Sender ID | 123456 | -| Receiver ID | 987654 | -| Patient ID | 987-65-4321 | -| Patient Name | Smith, Jane | -| Diagnosis Code | I21.3 | -| Diagnosis Description | ST elevation (STEMI) myocardial infarction of unspecified site | -| Service Start Date | 2023-05-01 | -| Service End Date | 2023-05-01 | -| CPT Code | 99214 | -| CPT Description | Office/outpatient visit, established patient | -| CPT Modifier | | -| CPT Quantity | 1 | -| CPT Unit Price | 150.00 | -| CPT Total Price | 150.00 | -| ICD Code | I21.3 | -| ICD Description | ST elevation (STEMI) myocardial inf - -## Anxiety & Depression Clinical Document - -Patient: Alice Johnson -DOB: 06/15/1980 -Diagnosis: Major Depressive Disorder (ICD-10 F32.1) -Medications: Sertraline 50mg PO daily - -Assessment: -Ms. Johnson reports feeling sad and unmotivated. She has trouble sleeping and has lost interest in activities she used to enjoy. She denies any suicidal ideation. - -Plan: -Increase sertraline to 100mg PO daily. Schedule follow-up appointment in 2 weeks. - -Signed: Dr. Sarah Lee - -### EDI 278A Request Example - -| Field | Value | -|-------|-------| -| Transaction Code | 278 | -| Transaction Type | Request | -| Service Type Code | HST | -| Service Type | Healthcare Services | -| Sender ID | 123456 | -| Receiver ID | 987654 | -| Patient ID | 456-78-9123 | -| Patient Name | Johnson, Alice | -| Diagnosis Code | F32.1 | -| Diagnosis Description | Major depressive disorder, single episode, moderate | -| Service Start Date | 2023-05-01 | -| Service End Date | 2023-05-01 | -| CPT Code | 90834 | -| CPT Description | Psychotherapy, 45 minutes with patient | -| CPT Modifier | | -| CPT Quantity | 1 | -| CPT Unit Price | 200.00 | -| CPT Total Price | 200.00 | -| ICD Code | F32.1 | -| ICD Description | Major depressive disorder, single episode, moderate | -| Service Line Control Number | 1 | - - -### Musculoskeletal Disorders Clinical Document - -Patient: Mark Thompson -DOB: 02/25/1972 -Diagnosis: Low Back Pain (ICD-10 M54.5) -Medications: Ibuprofen 400mg PO TID - -Assessment: -Mr. Thompson reports chronic low back pain that has been getting worse over the past several months. Physical exam reveals tenderness and limited range of motion in the lumbar spine. - -Plan: -Prescribe physical therapy with focus on core strengthening exercises. Schedule follow-up appointment in 4 weeks. - -Signed: Dr. James Chen - - -| Field | Value | -|-------|-------| -| Transaction Code | 278 | -| Transaction Type | Request | -| Service Type Code | HST | -| Service Type | Healthcare Services | -| Sender ID | 123456 | -| Receiver ID | 987654 | -| Patient ID | 789-01-2345 | -| Patient Name | Thompson, Mark | -| Diagnosis Code | M54.5 | -| Diagnosis Description | Low back pain | -| Service Start Date | 2023-05-01 | -| Service End Date | 2023-05-01 | -| CPT Code | 97110 | -| CPT Description | Therapeutic exercises | -| CPT Modifier | | -| CPT Quantity | 1 | -| CPT Unit Price | 150.00 | -| CPT Total Price | 150.00 | -| ICD Code | M54.5 | -| ICD Description | Low back pain | -| Service Line Control Number | 1 | - - -ADT A01 Admit Transaction Example - -| Field | Value | -|-------|-------| -| MSH | \|HL7\|2.6\|ADT_A01_MESSAGE\|RECEIVER_APPLICATION\|20230505120000\|SECURITY\|ADT_A01\|MSG00001\|P\|2.6\ - - - - -## Big Four - Diabetes, Heart Disease, Anxiety & Depression and Muskoskeletal Disorders - -| Costly Condition | Code Type | Code Value | Emoji | Code Description | -|---------------------------------|-----------|----------------------|-----------|---------------------------------------------------------| -| Diabetes | CPT | 99214, 99215 | 🩺 | Office/outpatient visit | -| | ICD-10 | E08.00 - E13.9 | 🍬 | Diabetes Mellitus | -| | SNOMED | 73211009, 44054006 | 🍬 | Diabetes Mellitus Type 1, Diabetes Mellitus Type 2 | -| | LOINC | 4548-4, 14647-2 | 🍬 | Hemoglobin A1c, Glucose | -| | Omaha | 1405-8, 1435-4 | 🍬 | Diabetes Management, Glucose Monitoring | -| Heart Disease | CPT | 92920, 92928 | 💔 | Percutaneous coronary intervention | -| | ICD-10 | I20.0 - I25.9 | 💔 | Ischemic Heart Diseases | -| | SNOMED | 53741008, 84114007 | 💔 | Coronary Artery Disease, Myocardial Infarction | -| | LOINC | 24331-1, 6768-6 | 💔 | Troponin I, Total Cholesterol | -| | Omaha | 2610-5, 2871-4 | 💔 | Hypertension Management, Lipid Management | -| Anxiety & Depression | CPT | 90791, 90834, 90837 | 😨 | Psychiatric diagnostic evaluation, psychotherapy | -| | ICD-10 | F32.0 - F39 | 😨 | Mood (Affective) Disorders | -| | SNOMED | 42343007, 39607008 | 😨 | Major Depressive Disorder, Generalized Anxiety Disorder | -| | LOINC | 75216-8, 73633-2 | 😨 | PHQ-9 Total Score, GAD-7 Total Score | -| | Omaha | 5250-7, 5361-7 | 😨 | Depression Management, Anxiety Management | -| Musculoskeletal Disorders | CPT | 97110, 97112 | 🏋️‍♂️ | Therapeutic exercises, neuromuscular reeducation | -| | ICD-10 | M00.00 - M99.9 | 🏋️‍♂️ | Diseases of the musculoskeletal system and connective tissue | -| | SNOMED | 239873007, 80931005 | 🏋️‍♂️ | Osteoarthritis, Low Back Pain | -| | LOINC | 8302-2, 71425-3 | 🏋️‍♂️ | Creatine Kinase, Aldolase | -| | Omaha | 3110-2, 3120-6 | 🏋️‍♂️ | Musculoskeletal Assessment, Pain Management | - - -| Rank | Costly Condition | Approval Code Type | Approval Codes | Code Description | Medical Necessity Rules | -|------|---------------------------------|--------------------|-------------------------|----------------------------------------|--------------------------------------------------------------------------------------------------------------------------| -| 1 | 🍬 Diabetes | CPT | 99214, 99215 | Office/outpatient visit | - Diagnosis confirmation | -| | | ICD-10 | E08.00 - E13.9 | Diabetes Mellitus | - Documented treatment plan | -| | | | | | - Medication adherence | -| | | | | | - Glucose monitoring | -| | | | | | - Physician referral | -| 2 | 💔 Heart Disease | CPT | 92920, 92928 | Percutaneous coronary intervention | - Diagnosis confirmation | -| | | ICD-10 | I20.0 - I25.9 | Ischemic Heart Diseases | - Documented treatment plan | -| | | | | | - Conservative treatment history | -| | | | | | - Cardiac risk factors | -| | | | | | - Physician referral | -| 3 | 😨 Anxiety & Depression | CPT | 90791, 90834, 90837 | Psychiatric diagnostic evaluation, psychotherapy | - Diagnosis confirmation | -| | | ICD-10 | F32.0 - F39 | Mood (Affective) Disorders | - Documented treatment plan | -| | | | | | - Severity assessment | -| | | | | | - Conservative treatment history | -| | | | | | - Physician referral | -| 4 | 🏋️‍♂️ Musculoskeletal Disorders | CPT | 97110, 97112 | Therapeutic exercises, neuromuscular reeducation | - Diagnosis confirmation | -| | | ICD-10 | M00.00 - M99.9 | Diseases of the musculoskeletal system and connective tissue | - Documented treatment plan | -| | | | | | - Conservative treatment history | -| | | | | | - Functional limitation | -| | | | | | - Physician referral | -| 5 | 🦷 Dental Issues | CPT | 00100, 00170 | Anesthesia for intraoral procedures, incision and drainage of abscess | - Diagnosis confirmation | -| | | ICD-10 | K00.0 - K14.9 | Diseases of oral cavity and salivary glands | - Documented treatment plan | -| | | | | | - Conservative treatment history | -| | | | | | - Dentist referral | -| 6 | 🌊 Chronic Kidney Disease | CPT | 90935, 90937 | Hemodialysis, outpatient | - Diagnosis confirmation | -| | | ICD-10 | N18.1 - N18.9 | Chronic kidney disease | - Documented treatment plan | -| | | | | | - Stage of kidney disease | -| | | | | | - Conservative treatment history | -| | | | | | - Physician referral | - - -## Costly Top Six with Code Description and Medical Necessity Rules - -| Rank | Costly Condition | Approval Code Type | Approval Codes | Code Description | Medical Necessity Rules | -|------|---------------------------------|--------------------|-------------------------|----------------------------------------|--------------------------------------------------------------------------------------------------------------------------| -| 1 | 🍬 Diabetes | CPT | 99214, 99215 | Office/outpatient visit | Diagnosis confirmation, documented treatment plan, medication adherence, glucose monitoring, physician referral | -| | | ICD-10 | E08.00 - E13.9 | Diabetes Mellitus | | -| 2 | 💔 Heart Disease | CPT | 92920, 92928 | Percutaneous coronary intervention | Diagnosis confirmation, documented treatment plan, conservative treatment history, cardiac risk factors, physician referral | -| | | ICD-10 | I20.0 - I25.9 | Ischemic Heart Diseases | | -| 3 | 😨 Anxiety & Depression | CPT | 90791, 90834, 90837 | Psychiatric diagnostic evaluation, psychotherapy | Diagnosis confirmation, documented treatment plan, severity assessment, conservative treatment history, physician referral | -| | | ICD-10 | F32.0 - F39 | Mood (Affective) Disorders | | -| 4 | 🏋️‍♂️ Musculoskeletal Disorders | CPT | 97110, 97112 | Therapeutic exercises, neuromuscular reeducation | Diagnosis confirmation, documented treatment plan, conservative treatment history, functional limitation, physician referral | -| | | ICD-10 | M00.00 - M99.9 | Diseases of the musculoskeletal system and connective tissue | | -| 5 | 🦷 Dental Issues | CPT | 00100, 00170 | Anesthesia for intraoral procedures, incision and drainage of abscess | Diagnosis confirmation, documented treatment plan, conservative treatment history, dentist referral | -| | | ICD-10 | K00.0 - K14.9 | Diseases of oral cavity and salivary glands | | -| 6 | 🌊 Chronic Kidney Disease | CPT | 90935, 90937 | Hemodialysis, outpatient | Diagnosis confirmation, documented treatment plan, stage of kidney disease, conservative treatment history, physician referral | -| | | ICD-10 | N18.1 - N18.9 | Chronic kidney disease | | - - -## Costly Top Six: - -| Rank | Costly Condition | 💰 Spending (billions) | CPT Range Start | CPT Range Finish | ICD-10 Diagnosis Codes | -|------|---------------------------------|------------------------|-----------------|------------------|------------------------| -| 1 | 🍬 Diabetes | 327 | 48100 | 48999 | E08.00 - E13.9 | -| 2 | 💔 Heart Disease | 230 | 92920 | 93799 | I20.0 - I25.9 | -| 3 | 😨 Anxiety & Depression | 210 | 90791 | 90899 | F32.0 - F39 | -| 4 | 🏋️‍♂️ Musculoskeletal Disorders | 176 | 97110 | 97799 | M00.00 - M99.9 | -| 5 | 🦷 Dental Issues | 130 | 00100 | 00192 | K00.0 - K14.9 | -| 6 | 🌊 Chronic Kidney Disease | 110 | 50010 | 50999 | N18.1 - N18.9 | - - -## Service Types and Associated Evidence of Med Nec - -| No. | Service Type | CPT Code Range | Rules for Required Evidence of Medical Necessity | -|-----|-----------------------------|----------------------|--------------------------------------------------------| -| 1 | 🫀 Organ Transplant | 50300-50380 | Diagnosis📄, waiting list📃, physician referral👩‍⚕️ | -| 2 | 🦴 Spinal Fusion Surgery | 22532-22812 | Diagnosis📄, conservative treatment history📚, physician referral👩‍⚕️ | -| 3 | 🍔 Bariatric Surgery | 43644-43775 | BMI🏋️, documented weight loss attempts📉, physician referral👩‍⚕️, psychological evaluation🧠 | -| 4 | 🦵 Joint Replacement Surgery | 27130-27447 | Diagnosis📄, conservative treatment history📚, physician referral👩‍⚕️ | -| 5 | 💉 Chemotherapy | 96401-96549 | Cancer diagnosis🦠, treatment plan💊, medication💊, dosage💊, frequency💊 | -| 6 | ☢️ Radiation Therapy | 77261-77799 | Cancer diagnosis🦠, treatment plan💊, physician referral👩‍⚕️ | -| 7 | ❤️ Cardiac Surgery | 33010-33999 | Diagnosis📄, conservative treatment history📚, physician referral👩‍⚕️ | -| 8 | 🧊 Dialysis | 90935-90999 | Diagnosis of kidney disease🩸, treatment plan💊, physician referral👩‍⚕️ | -| 9 | 🫁 Gastrointestinal Surgery | 43620-44979 | Diagnosis📄, conservative treatment history📚, physician referral👩‍⚕️ | -| 10 | 🖼️ Advanced Imaging Services | 70450-72159 (CT), 70540-72198 (MRI) | Clinical history📚, prior relevant imaging📸, symptoms justification😷 | -| 11 | 🎯 Interventional Radiology | 37220-37235 | Diagnosis📄, conservative treatment history📚, physician referral👩‍⚕️ | -| 12 | 🛌 Sleep Study | 95800-95811 | Documented sleep disorder symptoms😴, sleep diary📘, physician referral👩‍⚕️ | -| 13 | 💉 Infusion Therapy | 96360-96549 | Diagnosis📄, medication💊, dosage💊, frequency💊, duration⏳ | -| 14 | 💊 Pain Management | 64400-64530 | Diagnosis📄, conservative treatment history📚, treatment plan💊 | -| 15 | ❤️ Cardiac Stress Test | 93015-93018 | Documented symptoms😷, cardiac risk factors❤️, physician referral👩‍⚕️ | -| 16 | 🫁 Pulmonary Function Test | 94010-94799 | Documented respiratory issues😷, physician referral👩‍⚕️ | -| 17 | 🏃‍♂️ Physical Therapy | 97110-97546 | Diagnosis📄, treatment plan💊, physician referral👩‍⚕️ | -| 18 | 🧠 Mental Health Services | 90791-90899 | Diagnosis📄, treatment plan💊, physician referral👩‍⚕️ | -| 19 | 👓 Vision Services | 92002-92499 | Diagnosis📄, conservative treatment history📚, physician referral👩‍⚕️ | -| 20 | 👂 Hearing Services | 92502-92700 | Diagnosis📄, conservative treatment history📚, physician referral👩‍⚕️ | - -## Services Descending by Costly with CPT and ICD10 code ranges - -| Rank | Costly Condition | 💰 Spending (billions) | CPT Range Start | CPT Range Finish | ICD-10 Diagnosis Codes | -|------|-----------------------------|------------------------|-----------------|------------------|------------------------| -| 1 | 🍬 Diabetes | 327 | 48100 | 48999 | E08.00 - E13.9 | -| 2 | 💔 Heart Disease | 230 | 92920 | 93799 | I20.0 - I25.9 | -| 3 | 😨 Anxiety & Depression | 210 | 90791 | 90899 | F32.0 - F39 | -| 4 | 🏋️‍♂️ Musculoskeletal Disorders | 176 | 97110 | 97799 | M00.00 - M99.9 | -| 5 | 🦷 Dental Issues | 130 | 00100 | 00192 | K00.0 - K14.9 | -| 6 | 🌊 Chronic Kidney Disease | 110 | 50010 | 50999 | N18.1 - N18.9 | -| 7 | 😷 Chronic Obstructive Pulmonary Disease | 70 | 94002 | 94799 | J44.0 - J44.9 | -| 8 | 🍺 Liver Disease | 40 | 47000 | 47999 | K70.0 - K77.9 | -| 9 | 🤧 Allergies | 25 | 31231 | 31294 | J30.0 - J39.9 | -| 10 | 🔥 Gastroesophageal Reflux Disease | 17 | 43200 | 43289 | K21.0 - K21.9 | -| 11 | 🎗️ Endometriosis | 22 | 56405 | 58999 | N80.0 - N80.9 | -| 12 | 🚽 Inflammatory Bowel Disease | 14.6 | 44140 | 44238 | K50.00 - K52.9 | -| 13 | 📢 Hearing Loss | 7.1 | 92502 | 92700 | H90.0 - H94.9 | -| 14 | 👓 Cataracts | 10.7 | 92002 | 92499 | H25.0 - H28.9 | -| 15 | 🦠 Hypothyroidism | 3.1 | 60210 | 60271 | E00.0 - E03.9 | -| 16 | 🩸 Anemia | 5.6 | 38100 | 38199 | D50.0 - D64.9 | -| 17 | 😰 Adrenal Disorders | 1 | 60500 | 60699 | E27.0 - E27.9 | -| 18 | 🌞 Skin Cancer | 8.1 | 96910 | 96999 | C43.0 - C44.9 | -| 19 | 💧 Urinary Incontinence | 8 | 51700 | 51798 | N39.3 - N39.4 | -| 20 | 🤕 Peripheral Neuropathy | 19 | 95900 | 96004 | G60.0 - G65.9 | -| 21 | 🍼 Asthma | 6 | 94010 | 94799 | J45.0 - J45.9 | -| 22 | 🦠 Infections | 15 | 10060 | 17999 | A00.0 - B99.9 | -| 23 | 🧠 Neurological Disorders | 12 | 95805 | 95872 | G00.0 - G99.9 | -| 24 | 🤰 Pregnancy Complications | 20 | 59000 | 59899 | O00.0 - O9A.9 | -| 25 | 💉 Blood Disorders | 8 | 38200 | 38999 | D65.0 - D89.9 | -| 26 | 🏥 Hospital-Acquired Conditions | 7 | 99800 | 99899 | E87.0 - E87.9 | -| 27 | 🦴 Osteoporosis | 5 | 73300 | 73399 | M80.0 - M82.9 | -| 28 | 🤒 Infectious Diseases | 10 | 00300 | 00352 | A00.0 - A99.9 | -| 29 | 🤕 Traumatic Injuries | 9 | 11000 | 11012 | S00.0 - T98.9 | -| 30 | 🍔 Obesity | 4 | 27800 | 27899 | E66.0 - E66.9 | - - - - - -## Services - -| Service Type | CPT Code | Rules for Required Evidence of Medical Necessity | -|---------------------------|----------|-----------------------------------------------------------------------| -| Mental Health Services | 90791 | Physician referral, initial evaluation, treatment plan | -| Eye Examination | 92002 | Documented vision problems, physician referral | -| Hearing Test | 92502 | Documented hearing problems, physician referral | -| Sinus CT Scan | 31231 | Clinical history, prior relevant imaging, symptoms justification | -| Dental Surgery | 00100 | Diagnosis, treatment plan, physician referral | -| Thyroidectomy | 60210 | Diagnosis, conservative treatment history, physician referral | -| Cardiac Stress Test | 93015 | Documented symptoms, cardiac risk factors, physician referral | -| Pulmonary Function Test | 94002 | Documented respiratory issues, physician referral | -| Upper GI Endoscopy | 43200 | Documented gastrointestinal issues, physician referral | -| Liver Biopsy | 47000 | Diagnosis, treatment plan, physician referral | -| Kidney Stone Removal | 50010 | Diagnosis, conservative treatment history, physician referral | -| Adrenal Gland Surgery | 60500 | Diagnosis, conservative treatment history, physician referral | -| Pancreatic Surgery | 48100 | Diagnosis, conservative treatment history, physician referral | -| Splenectomy | 38100 | Diagnosis, conservative treatment history, physician referral | -| Colonoscopy | 44140 | Documented gastrointestinal issues, physician referral | -| Cystoscopy | 51700 | Documented urinary issues, physician referral | -| Hysterectomy | 58150 | Diagnosis, conservative treatment history, physician referral | -| Nerve Conduction Study | 95900 | Documented peripheral neuropathy, physician referral | -| Skin Biopsy | 96910 | Documented skin lesions, physician referral | -| Physical Therapy | 97110 | Physician referral, initial evaluation, treatment plan | - - - - -## Main Headings - Policy or Plan - -| Main Heading | Policy or Plan | -|--------------------------------------|----------------------------------------------------------------------| -| Service Code Grouping | Group codes based on service type or specialty | -| Listing for PA Medical Necessity | List of services requiring prior authorization for medical necessity | -| Approval Criteria | Guidelines and criteria for approving prior authorization requests | -| Required Evidence of Medical Necessity| Documentation needed to support medical necessity for PA requests | -| Service Codes (CPT) | Specific service codes that require prior authorization | - -## Service Code Groupings: - -| Service Type | CPT Code | Rules for Required Evidence of Medical Necessity | -|-----------------------|----------|----------------------------------------------------------------------------| -| Physical Therapy | 97001 | Physician referral, initial evaluation, treatment plan | -| Occupational Therapy | 97165 | Physician referral, initial evaluation, treatment plan | -| Speech Therapy | 92507 | Physician referral, initial evaluation, treatment plan | -| MRI Brain | 70551 | Clinical history, prior relevant imaging, symptoms justification | -| CT Scan Abdomen | 74150 | Clinical history, prior relevant imaging, symptoms justification | -| Sleep Study | 95810 | Documented sleep disorder symptoms, sleep diary, physician referral | -| Cardiac Stress Test | 93015 | Documented symptoms, cardiac risk factors, physician referral | -| Echocardiogram | 93306 | Documented symptoms, cardiac risk factors, physician referral | -| Home Health Services | 99341 | Physician referral, homebound status, plan of care | -| Infusion Therapy | 96365 | Diagnosis, medication, dosage, frequency, and duration | -| Pain Management | 64490 | Diagnosis, conservative treatment history, treatment plan | -| Bariatric Surgery | 43644 | BMI, documented weight loss attempts, physician referral, psychological evaluation | -| Joint Replacement | 27447 | Diagnosis, conservative treatment history, physician referral | -| Spinal Fusion | 22630 | Diagnosis, conservative treatment history, physician referral | -| Outpatient Surgery | 10060 | Diagnosis, procedure necessity justification, physician referral | -| Allergy Testing | 86003 | Documented allergy symptoms, treatment history, physician referral | -| Chemotherapy | 96413 | Cancer diagnosis, treatment plan, medication, dosage, and frequency | -| Radiation Therapy | 77412 | Cancer diagnosis, treatment plan, physician referral | -| Dialysis | 90935 | Diagnosis of kidney disease, treatment plan, physician referral | -| Inpatient Hospitalization | 99223 | Medical necessity for admission, diagnosis, treatment plan, physician referral | - - - - -## EDI Sample with All DX and Services - - -EDI: - - -ISA*00* *00* *ZZ*EMRSENDER *ZZ*RECEIVER *230504*1345*^*00501*000000001*0*P*:~ -GS*HS*EMRSENDER*RECEIVER*20230504*1345*1*X*005010X221A1~ -ST*278*0001~ -BHT*0078*11*100012345*20230504*1345~ -HL*1**20*1~ -NM1*X3*2*RECEIVER*****46*123456789~ -HL*2*1*21*1~ -NM1*1P*2*DOE*JANE****46*987654321~ -HL*3*2*19*1~ -TRN*1*100012345*987654321~ -UM*HS*100012345*987654321~ -HCR*A*1*ZZZ001~ -HI*BF:7295:::3~ -HI*BF:72148:::1~ -HI*BF:72156:::1~ -HI*BF:72158:::1~ -HI*BF:S72.0:::1~ -HI*BF:M16.10:::1~ -SE*13*0001~ -GE*1*1~ -IEA*1*000000001~ - - -## ADT A08 Event - -ADT: - - -MSH|^~\&|EMRSENDER|FACILITY_A|RECEIVER|FACILITY_A|20230504||ADT^A08^ADT_A01|0001|P|2.5 -EVN|A08|20230504|||BROWN^SARAH|20230504 -PID|||100012345^^^FACILITY_A^MRN||PATIENT^JOHN^M||||||||||||100012345 -PV1||I|FLOOR^1001^1^FACILITY_A||||987654321^DOE^JANE|||||||||||||||||||||||||20230504 -PV2|||^^^FACILITY_A|||||||||||||||||||||||||||||||||||||3 -PV3|3|ICD10|S72.0^M16.10^Z96.649 -PV4||O -AL1|1|||^^^72148^72156^72158 -DG1|1|ICD10|S72.0|Fracture of neck of femur|20230504 -DG1|2|ICD10|M16.10|Bilateral primary osteoarthritis of hip|20230504 -ZCD|3|CPT|72148^72156^72158 - - -## Clinical Document - -CCD: - -Clinical Document - -**Patient:** John Patient - -**Date:** 2023-05-04 - -**Chief Complaint:** -John experienced a fall, resulting in a **hip injury**. The patient complains of severe pain and difficulty in walking. - -**History of Present Illness:** -The patient has a history of **bilateral primary osteoarthritis of the hip (M16.10)**. The fall exacerbated the existing condition, causing **fracture of the neck of femur (S72.0)**. - -**Physical Examination:** -Upon examination, the patient had severe pain, limited range of motion, and swelling of the affected hip. Imaging studies were ordered to assess the extent of the damage. - -**Imaging Studies:** -1. **MRI of the pelvis (CPT 72148)** -2. **MRI of the bilateral hips (CPT 72156)** -3. **MRI of the bilateral femurs (CPT 72158)** - -The MRI scans revealed significant damage to the hip joint, confirming the **fracture of the neck of femur (S72.0)** and worsening of the **bilateral primary osteoarthritis of the hip (M16.10)**. - -**Assessment:** -Based on the clinical findings and imaging results, it is evident that the patient requires immediate surgical intervention to repair the hip joint and prevent further complications. - -**Plan:** -1. Admit the patient to the hospital for surgical treatment. -2. Perform an emergent **hip replacement surgery (Z96.649)** to repair the hip joint and restore function. -3. Postoperative rehabilitation and physical therapy to regain strength and mobility. - -**Medical Necessity:** -The emergent hip replacement surgery is medically necessary due to the following factors: -- Severe pain and functional impairment caused by the **fracture of the neck of femur (S72.0)** -- Worsening of pre-existing **bilateral primary osteoarthritis of the hip (M16.10)** -- High risk of complications if left untreated - -Based on the clinical evidence, the patient's condition warrants immediate surgical intervention to alleviate pain, restore function, and prevent further complications. The surgery is deemed medically necessary to improve the patient's quality of life and long-term prognosis. - -**Attending Physician:** Dr. Sarah Brown - -## Note and Summary Index by Code Type - -| Code Value | Code Description | Code Type | Decision Relevance | Additional Information | -|------------|-----------------------------------------------|-----------------|------------------------------------|---------------------------------------------| -| 7295 | Authorization and Referral Services | EDI | Required for authorization | | -| 72148 | MRI of the pelvis | CPT | Medical necessity for imaging | | -| 72156 | MRI of the bilateral hips | CPT | Medical necessity for imaging | | -| 72158 | MRI of the bilateral femurs | CPT | Medical necessity for imaging | | -| S72.0 | Fracture of the neck of femur | ICD10 | Diagnosis, treatment decision | | -| M16.10 | Bilateral primary osteoarthritis of the hip | ICD10 | Diagnosis, treatment decision | | -| Z96.649 | Presence of unspecified artificial hip joint | ICD10 | Procedure, treatment decision | | -| A08 | Update Patient Information | ADT Event | Patient update after surgery | | -| 72148 | MRI of the pelvis | ZCD | Diagnostic code for imaging | | -| 72156 | MRI of the bilateral hips | ZCD | Diagnostic code for imaging | | -| 72158 | MRI of the bilateral femurs | ZCD | Diagnostic code for imaging | | -| 987654321 | Dr. Jane Doe - Initial Attending Physician | NPI | Patient care provider | Taxonomy: 207Q00000X (Family Medicine) | -| 123456789 | Dr. Robert Smith - Second Attending Physician | NPI | Patient care provider | Taxonomy: 207Q00000X (Family Medicine) | -| 000000001 | Dr. Sarah Brown - Surgeon | NPI | Patient care provider, surgery | Taxonomy: 207XS0106X (Orthopedic Surgery) | -| 555444333 | Facility A | Facility NPI | Patient care facility | Name: Facility A | -| | | Facility Address| | Address: 123 Main St, City, State, Zip Code | -| J7325 | Hyaluronan or derivative, Synvisc or Synvisc-One, for intra-articular injection, 1 mg | HCPCS | Medication used during treatment | | -| Q4081 | Injection, dexamethasone sodium phosphate, 1 mg | HCPCS | Medication used during treatment | | -| 99238 | Hospital discharge day management, 30 minutes or less | CPT | Follow-up plan | | -| 99239 | Hospital discharge day management, more than 30 minutes | CPT | Follow-up plan | | - - - -""") \ No newline at end of file diff --git a/spaces/awacke1/Image-Semantic-Search/README.md b/spaces/awacke1/Image-Semantic-Search/README.md deleted file mode 100644 index ab06dcd3a0394c94f2510537b2abc8d276fc5296..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Image-Semantic-Search/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 🔎NLP Image-Semantic-Search 🖼️ -emoji: 🔎Img🖼️ -colorFrom: blue -colorTo: green -sdk: streamlit -sdk_version: 1.2.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/awacke1/MixtureOfMedicalExperts/README.md b/spaces/awacke1/MixtureOfMedicalExperts/README.md deleted file mode 100644 index cc3aa42f6bdfc87f147f94e9268b00473853095f..0000000000000000000000000000000000000000 --- a/spaces/awacke1/MixtureOfMedicalExperts/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: MixtureOfMedicalExperts -emoji: 🐢 -colorFrom: purple -colorTo: gray -sdk: streamlit -sdk_version: 1.27.2 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/banana-projects/datasets-card-creator/build/static/js/main.cbff00b9.chunk.js b/spaces/banana-projects/datasets-card-creator/build/static/js/main.cbff00b9.chunk.js deleted file mode 100644 index dd33007e9a8c36dfdfb59df44cb2100d4f2d2ff4..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/datasets-card-creator/build/static/js/main.cbff00b9.chunk.js +++ /dev/null @@ -1,2 +0,0 @@ -(this.webpackJsonpdatasetcard=this.webpackJsonpdatasetcard||[]).push([[0],{139:function(e,t,a){"use strict";a.r(t);var n=a(0),s=a(1),i=a.n(s),o=a(41),r=a.n(o),c=(a(48),a(18)),d=a(26),l=a(3),h=a.n(l),u=a(5),p=a(6);function m(e){var t=e.value,a=e.title,s=e.id,i=e.rows,o=e.handleClick,r=e.handleChange;return Object(n.jsxs)("div",{className:"",children:[Object(n.jsx)("div",{className:"text-base font-normal max-w-40 text-gray-600",children:a}),Object(n.jsx)("div",{className:"mt-2 mr-4",children:Object(n.jsx)("textarea",{value:t,onClick:function(e){return o(e)},onChange:function(e){return r(e)},id:s,name:s,rows:i,className:"font-sans p-2 shadow-sm border border-solid border-gray-300 block w-full text-gray-600 sm:text-sm rounded-md"})})]})}var f=a(28);function g(e){var t=e.title,a=e.section,s=e.handleSection;return Object(n.jsxs)(n.Fragment,{children:[Object(n.jsx)("div",{className:"mt-1",children:Object(n.jsxs)("div",{onClick:function(){return s()},className:"cursor-pointer flex justify-between inline-block pt-6 borders font-medium text-gray-700",children:[Object(n.jsxs)("div",{className:"",children:[" ",t," "]}),a?Object(n.jsx)(f.b,{className:"ml-2 "}):Object(n.jsx)(f.a,{className:"ml-2"})]})}),Object(n.jsx)("style",{children:"\n .borders {\n border-bottom: solid 1px;\n border-color: #e2e8f0;\n }\n "})]})}var b={name:"Instructions",instructions:{yamlTags:{paragraph:["Add YAML tags"],example:["---","annotations_creators:","- no-annotation","language_creators:","- found","languages:","- en","licenses:","- unknown","multilinguality:","- monolingual","size_categories:","- 100K THREE.NormalBlending; - - this.needsUpdate = false; - - return this; - -}; - -NodeMaterial.prototype.copy = function ( source ) { - - var uuid = this.uuid; - - for ( var name in source ) { - - this[ name ] = source[ name ]; - - } - - this.uuid = uuid; - - if ( source.userData !== undefined ) { - - this.userData = JSON.parse( JSON.stringify( source.userData ) ); - - } - -}; - -NodeMaterial.prototype.toJSON = function ( meta ) { - - var isRootObject = ( meta === undefined || typeof meta === 'string' ); - - if ( isRootObject ) { - - meta = { - nodes: {} - }; - - } - - if ( meta && ! meta.materials ) meta.materials = {}; - - if ( ! meta.materials[ this.uuid ] ) { - - var data = {}; - - data.uuid = this.uuid; - data.type = this.type; - - meta.materials[ data.uuid ] = data; - - if ( this.name !== "" ) data.name = this.name; - - if ( this.size !== undefined ) data.size = this.size; - if ( this.sizeAttenuation !== undefined ) data.sizeAttenuation = this.sizeAttenuation; - - if ( this.blending !== THREE.NormalBlending ) data.blending = this.blending; - if ( this.flatShading === true ) data.flatShading = this.flatShading; - if ( this.side !== THREE.FrontSide ) data.side = this.side; - if ( this.vertexColors !== THREE.NoColors ) data.vertexColors = this.vertexColors; - - if ( this.depthFunc !== THREE.LessEqualDepth ) data.depthFunc = this.depthFunc; - if ( this.depthTest === false ) data.depthTest = this.depthTest; - if ( this.depthWrite === false ) data.depthWrite = this.depthWrite; - - if ( this.linewidth !== 1 ) data.linewidth = this.linewidth; - if ( this.dashSize !== undefined ) data.dashSize = this.dashSize; - if ( this.gapSize !== undefined ) data.gapSize = this.gapSize; - if ( this.scale !== undefined ) data.scale = this.scale; - - if ( this.dithering === true ) data.dithering = true; - - if ( this.wireframe === true ) data.wireframe = this.wireframe; - if ( this.wireframeLinewidth > 1 ) data.wireframeLinewidth = this.wireframeLinewidth; - if ( this.wireframeLinecap !== 'round' ) data.wireframeLinecap = this.wireframeLinecap; - if ( this.wireframeLinejoin !== 'round' ) data.wireframeLinejoin = this.wireframeLinejoin; - - if ( this.alphaTest > 0 ) data.alphaTest = this.alphaTest; - if ( this.premultipliedAlpha === true ) data.premultipliedAlpha = this.premultipliedAlpha; - - if ( this.morphTargets === true ) data.morphTargets = true; - if ( this.skinning === true ) data.skinning = true; - - if ( this.visible === false ) data.visible = false; - if ( JSON.stringify( this.userData ) !== '{}' ) data.userData = this.userData; - - data.fog = this.fog; - data.lights = this.lights; - - data.vertex = this.vertex.toJSON( meta ).uuid; - data.fragment = this.fragment.toJSON( meta ).uuid; - - } - - meta.material = this.uuid; - - return meta; - -}; - -export { NodeMaterial }; diff --git a/spaces/bguberfain/Detic/detic/data/tar_dataset.py b/spaces/bguberfain/Detic/detic/data/tar_dataset.py deleted file mode 100644 index 0605ba3a96ab80a1212fdb1a3860337d7e7b20cc..0000000000000000000000000000000000000000 --- a/spaces/bguberfain/Detic/detic/data/tar_dataset.py +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -import os -import gzip -import numpy as np -import io -from PIL import Image -from torch.utils.data import Dataset - -try: - from PIL import UnidentifiedImageError - - unidentified_error_available = True -except ImportError: - # UnidentifiedImageError isn't available in older versions of PIL - unidentified_error_available = False - -class DiskTarDataset(Dataset): - def __init__(self, - tarfile_path='dataset/imagenet/ImageNet-21k/metadata/tar_files.npy', - tar_index_dir='dataset/imagenet/ImageNet-21k/metadata/tarindex_npy', - preload=False, - num_synsets="all"): - """ - - preload (bool): Recommend to set preload to False when using - - num_synsets (integer or string "all"): set to small number for debugging - will load subset of dataset - """ - tar_files = np.load(tarfile_path) - - chunk_datasets = [] - dataset_lens = [] - if isinstance(num_synsets, int): - assert num_synsets < len(tar_files) - tar_files = tar_files[:num_synsets] - for tar_file in tar_files: - dataset = _TarDataset(tar_file, tar_index_dir, preload=preload) - chunk_datasets.append(dataset) - dataset_lens.append(len(dataset)) - - self.chunk_datasets = chunk_datasets - self.dataset_lens = np.array(dataset_lens).astype(np.int32) - self.dataset_cumsums = np.cumsum(self.dataset_lens) - self.num_samples = sum(self.dataset_lens) - labels = np.zeros(self.dataset_lens.sum(), dtype=np.int64) - sI = 0 - for k in range(len(self.dataset_lens)): - assert (sI+self.dataset_lens[k]) <= len(labels), f"{k} {sI+self.dataset_lens[k]} vs. {len(labels)}" - labels[sI:(sI+self.dataset_lens[k])] = k - sI += self.dataset_lens[k] - self.labels = labels - - def __len__(self): - return self.num_samples - - def __getitem__(self, index): - assert index >= 0 and index < len(self) - # find the dataset file we need to go to - d_index = np.searchsorted(self.dataset_cumsums, index) - - # edge case, if index is at edge of chunks, move right - if index in self.dataset_cumsums: - d_index += 1 - - assert d_index == self.labels[index], f"{d_index} vs. {self.labels[index]} mismatch for {index}" - - # change index to local dataset index - if d_index == 0: - local_index = index - else: - local_index = index - self.dataset_cumsums[d_index - 1] - data_bytes = self.chunk_datasets[d_index][local_index] - exception_to_catch = UnidentifiedImageError if unidentified_error_available else Exception - try: - image = Image.open(data_bytes).convert("RGB") - except exception_to_catch: - image = Image.fromarray(np.ones((224,224,3), dtype=np.uint8)*128) - d_index = -1 - - # label is the dataset (synset) we indexed into - return image, d_index, index - - def __repr__(self): - st = f"DiskTarDataset(subdatasets={len(self.dataset_lens)},samples={self.num_samples})" - return st - -class _TarDataset(object): - - def __init__(self, filename, npy_index_dir, preload=False): - # translated from - # fbcode/experimental/deeplearning/matthijs/comp_descs/tardataset.lua - self.filename = filename - self.names = [] - self.offsets = [] - self.npy_index_dir = npy_index_dir - names, offsets = self.load_index() - - self.num_samples = len(names) - if preload: - self.data = np.memmap(filename, mode='r', dtype='uint8') - self.offsets = offsets - else: - self.data = None - - - def __len__(self): - return self.num_samples - - def load_index(self): - basename = os.path.basename(self.filename) - basename = os.path.splitext(basename)[0] - names = np.load(os.path.join(self.npy_index_dir, f"{basename}_names.npy")) - offsets = np.load(os.path.join(self.npy_index_dir, f"{basename}_offsets.npy")) - return names, offsets - - def __getitem__(self, idx): - if self.data is None: - self.data = np.memmap(self.filename, mode='r', dtype='uint8') - _, self.offsets = self.load_index() - - ofs = self.offsets[idx] * 512 - fsize = 512 * (self.offsets[idx + 1] - self.offsets[idx]) - data = self.data[ofs:ofs + fsize] - - if data[:13].tostring() == '././@LongLink': - data = data[3 * 512:] - else: - data = data[512:] - - # just to make it more fun a few JPEGs are GZIP compressed... - # catch this case - if tuple(data[:2]) == (0x1f, 0x8b): - s = io.BytesIO(data.tostring()) - g = gzip.GzipFile(None, 'r', 0, s) - sdata = g.read() - else: - sdata = data.tostring() - return io.BytesIO(sdata) \ No newline at end of file diff --git a/spaces/bhasker412/IDD-YOLO-Tracking/trackers/strongsort/sort/iou_matching.py b/spaces/bhasker412/IDD-YOLO-Tracking/trackers/strongsort/sort/iou_matching.py deleted file mode 100644 index 62d5a3f63b70db5e322b6f8766444dd824c010ae..0000000000000000000000000000000000000000 --- a/spaces/bhasker412/IDD-YOLO-Tracking/trackers/strongsort/sort/iou_matching.py +++ /dev/null @@ -1,82 +0,0 @@ -# vim: expandtab:ts=4:sw=4 -from __future__ import absolute_import -import numpy as np -from . import linear_assignment - - -def iou(bbox, candidates): - """Computer intersection over union. - - Parameters - ---------- - bbox : ndarray - A bounding box in format `(top left x, top left y, width, height)`. - candidates : ndarray - A matrix of candidate bounding boxes (one per row) in the same format - as `bbox`. - - Returns - ------- - ndarray - The intersection over union in [0, 1] between the `bbox` and each - candidate. A higher score means a larger fraction of the `bbox` is - occluded by the candidate. - - """ - bbox_tl, bbox_br = bbox[:2], bbox[:2] + bbox[2:] - candidates_tl = candidates[:, :2] - candidates_br = candidates[:, :2] + candidates[:, 2:] - - tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis], - np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]] - br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis], - np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]] - wh = np.maximum(0., br - tl) - - area_intersection = wh.prod(axis=1) - area_bbox = bbox[2:].prod() - area_candidates = candidates[:, 2:].prod(axis=1) - return area_intersection / (area_bbox + area_candidates - area_intersection) - - -def iou_cost(tracks, detections, track_indices=None, - detection_indices=None): - """An intersection over union distance metric. - - Parameters - ---------- - tracks : List[deep_sort.track.Track] - A list of tracks. - detections : List[deep_sort.detection.Detection] - A list of detections. - track_indices : Optional[List[int]] - A list of indices to tracks that should be matched. Defaults to - all `tracks`. - detection_indices : Optional[List[int]] - A list of indices to detections that should be matched. Defaults - to all `detections`. - - Returns - ------- - ndarray - Returns a cost matrix of shape - len(track_indices), len(detection_indices) where entry (i, j) is - `1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`. - - """ - if track_indices is None: - track_indices = np.arange(len(tracks)) - if detection_indices is None: - detection_indices = np.arange(len(detections)) - - cost_matrix = np.zeros((len(track_indices), len(detection_indices))) - for row, track_idx in enumerate(track_indices): - if tracks[track_idx].time_since_update > 1: - cost_matrix[row, :] = linear_assignment.INFTY_COST - continue - - bbox = tracks[track_idx].to_tlwh() - candidates = np.asarray( - [detections[i].tlwh for i in detection_indices]) - cost_matrix[row, :] = 1. - iou(bbox, candidates) - return cost_matrix diff --git a/spaces/binery/Table_Transformer_PaddleOCR/README.md b/spaces/binery/Table_Transformer_PaddleOCR/README.md deleted file mode 100644 index bb4430c7c3ee09164ac861add96caede3c81578e..0000000000000000000000000000000000000000 --- a/spaces/binery/Table_Transformer_PaddleOCR/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Table Transformer PaddleOCR -emoji: 😻 -colorFrom: green -colorTo: pink -sdk: streamlit -sdk_version: 1.15.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/bioriAsaeru/text-to-voice/How to Create Amazing Oriental Music with Findasound Oriental Soloist 2 KONTAKT.md b/spaces/bioriAsaeru/text-to-voice/How to Create Amazing Oriental Music with Findasound Oriental Soloist 2 KONTAKT.md deleted file mode 100644 index 4f15606d22b341ca759bb1f300c321f7acf3f4cc..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/How to Create Amazing Oriental Music with Findasound Oriental Soloist 2 KONTAKT.md +++ /dev/null @@ -1,6 +0,0 @@ -

FindasoundOrientalSoloist2KONTAKT


DOWNLOADhttps://urloso.com/2uyRVD



- - aaccfb2cb3
-
-
-

diff --git a/spaces/blmdsydm/faster-whisper-webui/docs/colab.md b/spaces/blmdsydm/faster-whisper-webui/docs/colab.md deleted file mode 100644 index 3fcdb835327238764fb643b9bbd2e27b6e14f58c..0000000000000000000000000000000000000000 --- a/spaces/blmdsydm/faster-whisper-webui/docs/colab.md +++ /dev/null @@ -1,20 +0,0 @@ -# Running Whisper on Google Colab - -If you don't have a decent GPU or any experience in running command-line applications, you might want to try this Google Colab instead: - -* [Google Colab - Whisper WebUI GPU](https://colab.research.google.com/drive/1qeTSvi7Bt_5RMm88ipW4fkcsMOKlDDss?usp=sharing) -* [Screenshots](https://imgur.com/a/ZfY6uBO) - -The runtime (Runtime -> Change runtime type -> Hardware accelerator) should already be set top GPU. But if not, change it to GPU. - -Then, sign in to Google if you haven't already. Next, click on "Connect" at the top right. - -Under "Checking out WebUI from Git", click on the [play icon](https://imgur.com/a/81gOLyD) that appears in "[ ]" at the left. If you get a warning, click "Run anyway". - -After this step has completed, it should be get a green check mark. Then move on to the next section under "Installing dependencies", and click in "[ ]" again. This might take approximately 30 seconds. - -Once this has completed, scroll down to the "Run WebUI" section, and click on "[ ]". This will launch the WebUI in a shared link (expires in 72 hours). To open the UI, click on the link next to "Running on public URL", which will be something like https://12xxx.gradio.app/ - -The audio length in this version is not restricted, and it will run much faster as it is backed by a GPU. You can also run it using the "Large" model. Also note that it might take some time to start the model the first time, as it may need to download a 2.8 GB file on Google's servers. - -Once you're done, you can close the WebUI session by clicking the animated close button under "Run WebUI". You can also do this if you encounter any errors and need to restart the UI. You should also go to "Manage Sessions" and terminate the session, otherwise you may end up using all your free compute credits. \ No newline at end of file diff --git a/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/grids/compression/encodec_base_24khz.py b/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/grids/compression/encodec_base_24khz.py deleted file mode 100644 index 117b2b1e496ca31b3d614672b472c9213cedb4ad..0000000000000000000000000000000000000000 --- a/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/grids/compression/encodec_base_24khz.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Grid search file, simply list all the exp you want in `explorer`. -Any new exp added there will be scheduled. -You can cancel and experiment by commenting its line. - -This grid shows how to train a base causal EnCodec model at 24 kHz. -""" - -from ._explorers import CompressionExplorer -from ...environment import AudioCraftEnvironment - - -@CompressionExplorer -def explorer(launcher): - partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global']) - launcher.slurm_(gpus=8, partition=partitions) - # base causal EnCodec trained on monophonic audio sampled at 24 kHz - launcher.bind_(solver='compression/encodec_base_24khz') - # replace this by the desired dataset - launcher.bind_(dset='audio/example') - # launch xp - launcher() diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/solver/lr_scheduler.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/solver/lr_scheduler.py deleted file mode 100644 index 01e1eb7854a9662b9595a7ffa9b0e484faf34dff..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/solver/lr_scheduler.py +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -import math -from bisect import bisect_right -from typing import List -import torch -from fvcore.common.param_scheduler import ( - CompositeParamScheduler, - ConstantParamScheduler, - LinearParamScheduler, - ParamScheduler, -) - -try: - from torch.optim.lr_scheduler import LRScheduler -except ImportError: - from torch.optim.lr_scheduler import _LRScheduler as LRScheduler - -logger = logging.getLogger(__name__) - - -class WarmupParamScheduler(CompositeParamScheduler): - """ - Add an initial warmup stage to another scheduler. - """ - - def __init__( - self, - scheduler: ParamScheduler, - warmup_factor: float, - warmup_length: float, - warmup_method: str = "linear", - rescale_interval: bool = False, - ): - """ - Args: - scheduler: warmup will be added at the beginning of this scheduler - warmup_factor: the factor w.r.t the initial value of ``scheduler``, e.g. 0.001 - warmup_length: the relative length (in [0, 1]) of warmup steps w.r.t the entire - training, e.g. 0.01 - warmup_method: one of "linear" or "constant" - rescale_interval: whether we will rescale the interval of the scheduler after - warmup - """ - # the value to reach when warmup ends - end_value = scheduler(0.0) if rescale_interval else scheduler(warmup_length) - start_value = warmup_factor * scheduler(0.0) - if warmup_method == "constant": - warmup = ConstantParamScheduler(start_value) - elif warmup_method == "linear": - warmup = LinearParamScheduler(start_value, end_value) - else: - raise ValueError("Unknown warmup method: {}".format(warmup_method)) - super().__init__( - [warmup, scheduler], - interval_scaling=["rescaled", "rescaled" if rescale_interval else "fixed"], - lengths=[warmup_length, 1 - warmup_length], - ) - - -class LRMultiplier(LRScheduler): - """ - A LRScheduler which uses fvcore :class:`ParamScheduler` to multiply the - learning rate of each param in the optimizer. - Every step, the learning rate of each parameter becomes its initial value - multiplied by the output of the given :class:`ParamScheduler`. - - The absolute learning rate value of each parameter can be different. - This scheduler can be used as long as the relative scale among them do - not change during training. - - Examples: - :: - LRMultiplier( - opt, - WarmupParamScheduler( - MultiStepParamScheduler( - [1, 0.1, 0.01], - milestones=[60000, 80000], - num_updates=90000, - ), 0.001, 100 / 90000 - ), - max_iter=90000 - ) - """ - - # NOTES: in the most general case, every LR can use its own scheduler. - # Supporting this requires interaction with the optimizer when its parameter - # group is initialized. For example, classyvision implements its own optimizer - # that allows different schedulers for every parameter group. - # To avoid this complexity, we use this class to support the most common cases - # where the relative scale among all LRs stay unchanged during training. In this - # case we only need a total of one scheduler that defines the relative LR multiplier. - - def __init__( - self, - optimizer: torch.optim.Optimizer, - multiplier: ParamScheduler, - max_iter: int, - last_iter: int = -1, - ): - """ - Args: - optimizer, last_iter: See ``torch.optim.lr_scheduler.LRScheduler``. - ``last_iter`` is the same as ``last_epoch``. - multiplier: a fvcore ParamScheduler that defines the multiplier on - every LR of the optimizer - max_iter: the total number of training iterations - """ - if not isinstance(multiplier, ParamScheduler): - raise ValueError( - "_LRMultiplier(multiplier=) must be an instance of fvcore " - f"ParamScheduler. Got {multiplier} instead." - ) - self._multiplier = multiplier - self._max_iter = max_iter - super().__init__(optimizer, last_epoch=last_iter) - - def state_dict(self): - # fvcore schedulers are stateless. Only keep pytorch scheduler states - return {"base_lrs": self.base_lrs, "last_epoch": self.last_epoch} - - def get_lr(self) -> List[float]: - multiplier = self._multiplier(self.last_epoch / self._max_iter) - return [base_lr * multiplier for base_lr in self.base_lrs] - - -""" -Content below is no longer needed! -""" - -# NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes -# only on epoch boundaries. We typically use iteration based schedules instead. -# As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean -# "iteration" instead. - -# FIXME: ideally this would be achieved with a CombinedLRScheduler, separating -# MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it. - - -class WarmupMultiStepLR(LRScheduler): - def __init__( - self, - optimizer: torch.optim.Optimizer, - milestones: List[int], - gamma: float = 0.1, - warmup_factor: float = 0.001, - warmup_iters: int = 1000, - warmup_method: str = "linear", - last_epoch: int = -1, - ): - logger.warning( - "WarmupMultiStepLR is deprecated! Use LRMultipilier with fvcore ParamScheduler instead!" - ) - if not list(milestones) == sorted(milestones): - raise ValueError( - "Milestones should be a list of" " increasing integers. Got {}", milestones - ) - self.milestones = milestones - self.gamma = gamma - self.warmup_factor = warmup_factor - self.warmup_iters = warmup_iters - self.warmup_method = warmup_method - super().__init__(optimizer, last_epoch) - - def get_lr(self) -> List[float]: - warmup_factor = _get_warmup_factor_at_iter( - self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor - ) - return [ - base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch) - for base_lr in self.base_lrs - ] - - def _compute_values(self) -> List[float]: - # The new interface - return self.get_lr() - - -class WarmupCosineLR(LRScheduler): - def __init__( - self, - optimizer: torch.optim.Optimizer, - max_iters: int, - warmup_factor: float = 0.001, - warmup_iters: int = 1000, - warmup_method: str = "linear", - last_epoch: int = -1, - ): - logger.warning( - "WarmupCosineLR is deprecated! Use LRMultipilier with fvcore ParamScheduler instead!" - ) - self.max_iters = max_iters - self.warmup_factor = warmup_factor - self.warmup_iters = warmup_iters - self.warmup_method = warmup_method - super().__init__(optimizer, last_epoch) - - def get_lr(self) -> List[float]: - warmup_factor = _get_warmup_factor_at_iter( - self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor - ) - # Different definitions of half-cosine with warmup are possible. For - # simplicity we multiply the standard half-cosine schedule by the warmup - # factor. An alternative is to start the period of the cosine at warmup_iters - # instead of at 0. In the case that warmup_iters << max_iters the two are - # very close to each other. - return [ - base_lr - * warmup_factor - * 0.5 - * (1.0 + math.cos(math.pi * self.last_epoch / self.max_iters)) - for base_lr in self.base_lrs - ] - - def _compute_values(self) -> List[float]: - # The new interface - return self.get_lr() - - -def _get_warmup_factor_at_iter( - method: str, iter: int, warmup_iters: int, warmup_factor: float -) -> float: - """ - Return the learning rate warmup factor at a specific iteration. - See :paper:`ImageNet in 1h` for more details. - - Args: - method (str): warmup method; either "constant" or "linear". - iter (int): iteration at which to calculate the warmup factor. - warmup_iters (int): the number of warmup iterations. - warmup_factor (float): the base warmup factor (the meaning changes according - to the method used). - - Returns: - float: the effective warmup factor at the given iteration. - """ - if iter >= warmup_iters: - return 1.0 - - if method == "constant": - return warmup_factor - elif method == "linear": - alpha = iter / warmup_iters - return warmup_factor * (1 - alpha) + alpha - else: - raise ValueError("Unknown warmup method: {}".format(method)) diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/structures/chart_result.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/structures/chart_result.py deleted file mode 100644 index 003933d03d153d045c0bf551c465bc7a224d90cb..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/structures/chart_result.py +++ /dev/null @@ -1,183 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -from dataclasses import dataclass -from typing import Any, Optional, Tuple -import torch - - -@dataclass -class DensePoseChartResult: - """ - DensePose results for chart-based methods represented by labels and inner - coordinates (U, V) of individual charts. Each chart is a 2D manifold - that has an associated label and is parameterized by two coordinates U and V. - Both U and V take values in [0, 1]. - Thus the results are represented by two tensors: - - labels (tensor [H, W] of long): contains estimated label for each pixel of - the detection bounding box of size (H, W) - - uv (tensor [2, H, W] of float): contains estimated U and V coordinates - for each pixel of the detection bounding box of size (H, W) - """ - - labels: torch.Tensor - uv: torch.Tensor - - def to(self, device: torch.device): - """ - Transfers all tensors to the given device - """ - labels = self.labels.to(device) - uv = self.uv.to(device) - return DensePoseChartResult(labels=labels, uv=uv) - - -@dataclass -class DensePoseChartResultWithConfidences: - """ - We add confidence values to DensePoseChartResult - Thus the results are represented by two tensors: - - labels (tensor [H, W] of long): contains estimated label for each pixel of - the detection bounding box of size (H, W) - - uv (tensor [2, H, W] of float): contains estimated U and V coordinates - for each pixel of the detection bounding box of size (H, W) - Plus one [H, W] tensor of float for each confidence type - """ - - labels: torch.Tensor - uv: torch.Tensor - sigma_1: Optional[torch.Tensor] = None - sigma_2: Optional[torch.Tensor] = None - kappa_u: Optional[torch.Tensor] = None - kappa_v: Optional[torch.Tensor] = None - fine_segm_confidence: Optional[torch.Tensor] = None - coarse_segm_confidence: Optional[torch.Tensor] = None - - def to(self, device: torch.device): - """ - Transfers all tensors to the given device, except if their value is None - """ - - def to_device_if_tensor(var: Any): - if isinstance(var, torch.Tensor): - return var.to(device) - return var - - return DensePoseChartResultWithConfidences( - labels=self.labels.to(device), - uv=self.uv.to(device), - sigma_1=to_device_if_tensor(self.sigma_1), - sigma_2=to_device_if_tensor(self.sigma_2), - kappa_u=to_device_if_tensor(self.kappa_u), - kappa_v=to_device_if_tensor(self.kappa_v), - fine_segm_confidence=to_device_if_tensor(self.fine_segm_confidence), - coarse_segm_confidence=to_device_if_tensor(self.coarse_segm_confidence), - ) - - -@dataclass -class DensePoseChartResultQuantized: - """ - DensePose results for chart-based methods represented by labels and quantized - inner coordinates (U, V) of individual charts. Each chart is a 2D manifold - that has an associated label and is parameterized by two coordinates U and V. - Both U and V take values in [0, 1]. - Quantized coordinates Uq and Vq have uint8 values which are obtained as: - Uq = U * 255 (hence 0 <= Uq <= 255) - Vq = V * 255 (hence 0 <= Vq <= 255) - Thus the results are represented by one tensor: - - labels_uv_uint8 (tensor [3, H, W] of uint8): contains estimated label - and quantized coordinates Uq and Vq for each pixel of the detection - bounding box of size (H, W) - """ - - labels_uv_uint8: torch.Tensor - - def to(self, device: torch.device): - """ - Transfers all tensors to the given device - """ - labels_uv_uint8 = self.labels_uv_uint8.to(device) - return DensePoseChartResultQuantized(labels_uv_uint8=labels_uv_uint8) - - -@dataclass -class DensePoseChartResultCompressed: - """ - DensePose results for chart-based methods represented by a PNG-encoded string. - The tensor of quantized DensePose results of size [3, H, W] is considered - as an image with 3 color channels. PNG compression is applied and the result - is stored as a Base64-encoded string. The following attributes are defined: - - shape_chw (tuple of 3 int): contains shape of the result tensor - (number of channels, height, width) - - labels_uv_str (str): contains Base64-encoded results tensor of size - [3, H, W] compressed with PNG compression methods - """ - - shape_chw: Tuple[int, int, int] - labels_uv_str: str - - -def quantize_densepose_chart_result(result: DensePoseChartResult) -> DensePoseChartResultQuantized: - """ - Applies quantization to DensePose chart-based result. - - Args: - result (DensePoseChartResult): DensePose chart-based result - Return: - Quantized DensePose chart-based result (DensePoseChartResultQuantized) - """ - h, w = result.labels.shape - labels_uv_uint8 = torch.zeros([3, h, w], dtype=torch.uint8, device=result.labels.device) - labels_uv_uint8[0] = result.labels - labels_uv_uint8[1:] = (result.uv * 255).clamp(0, 255).byte() - return DensePoseChartResultQuantized(labels_uv_uint8=labels_uv_uint8) - - -def compress_quantized_densepose_chart_result( - result: DensePoseChartResultQuantized, -) -> DensePoseChartResultCompressed: - """ - Compresses quantized DensePose chart-based result - - Args: - result (DensePoseChartResultQuantized): quantized DensePose chart-based result - Return: - Compressed DensePose chart-based result (DensePoseChartResultCompressed) - """ - import base64 - import numpy as np - from io import BytesIO - from PIL import Image - - labels_uv_uint8_np_chw = result.labels_uv_uint8.cpu().numpy() - labels_uv_uint8_np_hwc = np.moveaxis(labels_uv_uint8_np_chw, 0, -1) - im = Image.fromarray(labels_uv_uint8_np_hwc) - fstream = BytesIO() - im.save(fstream, format="png", optimize=True) - labels_uv_str = base64.encodebytes(fstream.getvalue()).decode() - shape_chw = labels_uv_uint8_np_chw.shape - return DensePoseChartResultCompressed(labels_uv_str=labels_uv_str, shape_chw=shape_chw) - - -def decompress_compressed_densepose_chart_result( - result: DensePoseChartResultCompressed, -) -> DensePoseChartResultQuantized: - """ - Decompresses DensePose chart-based result encoded into a base64 string - - Args: - result (DensePoseChartResultCompressed): compressed DensePose chart result - Return: - Quantized DensePose chart-based result (DensePoseChartResultQuantized) - """ - import base64 - import numpy as np - from io import BytesIO - from PIL import Image - - fstream = BytesIO(base64.decodebytes(result.labels_uv_str.encode())) - im = Image.open(fstream) - labels_uv_uint8_np_chw = np.moveaxis(np.array(im, dtype=np.uint8), -1, 0) - return DensePoseChartResultQuantized( - labels_uv_uint8=torch.from_numpy(labels_uv_uint8_np_chw.reshape(result.shape_chw)) - ) diff --git a/spaces/bzd4576/sovits-sin/README.md b/spaces/bzd4576/sovits-sin/README.md deleted file mode 100644 index e6cb9d05399898afcc4ec4e0c9f4b6ee39c80349..0000000000000000000000000000000000000000 --- a/spaces/bzd4576/sovits-sin/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Sovits Sin -emoji: 📚 -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.3 -app_file: app.py -pinned: false -license: afl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/cadige/03-Streamlit-Video/streaming.py b/spaces/cadige/03-Streamlit-Video/streaming.py deleted file mode 100644 index cc2048269b3e9ac09886471ef9b6dc681db09f25..0000000000000000000000000000000000000000 --- a/spaces/cadige/03-Streamlit-Video/streaming.py +++ /dev/null @@ -1,66 +0,0 @@ -import subprocess - -import numpy as np - - -def ffmpeg_stream(youtube_url, sampling_rate=16_000, chunk_duration_ms=5000, pad_duration_ms=200): - """ - Helper function to read an audio file through ffmpeg. - """ - chunk_len = int(sampling_rate * chunk_duration_ms / 1000) - pad_len = int(sampling_rate * pad_duration_ms / 1000) - read_chunk_len = chunk_len + pad_len * 2 - - ar = f"{sampling_rate}" - ac = "1" - format_for_conversion = "f32le" - dtype = np.float32 - size_of_sample = 4 - - ffmpeg_command = [ - "ffmpeg", - "-i", - "pipe:", - "-ac", - ac, - "-ar", - ar, - "-f", - format_for_conversion, - "-hide_banner", - "-loglevel", - "quiet", - "pipe:1", - ] - - ytdl_command = ["yt-dlp", "-f", "bestaudio", youtube_url, "--quiet", "-o", "-"] - - try: - ffmpeg_process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=-1) - ytdl_process = subprocess.Popen(ytdl_command, stdout=ffmpeg_process.stdin) - except FileNotFoundError: - raise ValueError("ffmpeg was not found but is required to stream audio files from filename") - - acc = b"" - leftover = np.zeros((0,), dtype=np.float32) - while ytdl_process.poll() is None: - buflen = read_chunk_len * size_of_sample - - raw = ffmpeg_process.stdout.read(buflen) - if raw == b"": - break - - if len(acc) + len(raw) > buflen: - acc = raw - else: - acc += raw - - audio = np.frombuffer(acc, dtype=dtype) - audio = np.concatenate([leftover, audio]) - if len(audio) < pad_len * 2: - # TODO: handle end of stream better than this - break - yield audio - - leftover = audio[-pad_len * 2 :] - read_chunk_len = chunk_len \ No newline at end of file diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/Rethinking-BatchNorm/configs/mask_rcnn_SyncBNhead.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/Rethinking-BatchNorm/configs/mask_rcnn_SyncBNhead.py deleted file mode 100644 index 5f05da03514a4ee6aa37d6bc3e678873ead73c61..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/Rethinking-BatchNorm/configs/mask_rcnn_SyncBNhead.py +++ /dev/null @@ -1,3 +0,0 @@ -from .mask_rcnn_BNhead import model, dataloader, lr_multiplier, optimizer, train - -model.roi_heads.box_head.conv_norm = model.roi_heads.mask_head.conv_norm = "SyncBN" diff --git a/spaces/cheetah003/HMMC_t2v_search/modules/modeling.py b/spaces/cheetah003/HMMC_t2v_search/modules/modeling.py deleted file mode 100644 index 36fb3e1286ce64d42da2d3584eff171ab3b220a3..0000000000000000000000000000000000000000 --- a/spaces/cheetah003/HMMC_t2v_search/modules/modeling.py +++ /dev/null @@ -1,507 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import logging -import torch -from torch import nn -import numpy as np -import torch.nn.functional as F -from transformers import AutoConfig, AutoModel, BertTokenizer -from modules.tokenization_clip import SimpleTokenizer as ClipTokenizer -from modules.until_module import PreTrainedModel, AllGather, CrossEn, Dual_CrossEn -from modules.module_cross import TextEncoder, VisualEncoder, CrossConfig, BertLMPredictionHead - -logger = logging.getLogger(__name__) -allgather = AllGather.apply - - -class CLIP4ClipPreTrainedModel(PreTrainedModel, nn.Module): - """ An abstract class to handle weights initialization and - a simple interface for dowloading and loading pretrained models. - """ - - def __init__(self, cross_config, *inputs, **kwargs): - super(CLIP4ClipPreTrainedModel, self).__init__(cross_config) - self.cross_config = cross_config - - @classmethod - def from_pretrained(cls, cross_model_name, state_dict=None, cache_dir=None, type_vocab_size=2, *inputs, **kwargs): - - task_config = None - if "task_config" in kwargs.keys(): - task_config = kwargs["task_config"] - if not hasattr(task_config, "local_rank"): - task_config.__dict__["local_rank"] = 0 - elif task_config.local_rank == -1: - task_config.local_rank = 0 - - cross_config, _ = CrossConfig.get_config(cross_model_name, cache_dir, type_vocab_size, state_dict=None, - task_config=task_config) - - model = cls(cross_config, *inputs, **kwargs) - - if state_dict is not None: - model = cls.init_preweight(model, state_dict, task_config=task_config) - - return model - - -def show_log(task_config, info): - if task_config is None or task_config.local_rank == 0: - logger.warning(info) - - -def update_attr(target_name, target_config, target_attr_name, source_config, source_attr_name, default_value=None): - if hasattr(source_config, source_attr_name): - if default_value is None or getattr(source_config, source_attr_name) != default_value: - setattr(target_config, target_attr_name, getattr(source_config, source_attr_name)) - show_log(source_config, "Set {}.{}: {}.".format(target_name, - target_attr_name, getattr(target_config, target_attr_name))) - return target_config - - -def check_attr(target_name, task_config): - return hasattr(task_config, target_name) and task_config.__dict__[target_name] - - -class BirdPreTrainedModel(CLIP4ClipPreTrainedModel): - def __init__(self, cross_config, task_config): - super(BirdPreTrainedModel, self).__init__(cross_config) - self.task_config = task_config - self.rank = task_config.local_rank - self.mlm_probability = cross_config.mlm_probability - self.top_frames = task_config.top_frames - # self.weight_sum = torch.nn.Parameter(torch.tensor([0.5], dtype=torch.float32), requires_grad=True) - self.weight_FAM = cross_config.weight_FAM - self.weight_VTM = cross_config.weight_VTM - self.weight_FTM = cross_config.weight_FTM - self.weight_MLM = cross_config.weight_MLM - self.contrast_momentum = task_config.contrast_momentum - self.contrast_temperature = task_config.contrast_temperature - self.contrast_num_negative = task_config.contrast_num_negative - ################## chinese text Encoder - if self.task_config.language == "chinese": - self.tokenizer = BertTokenizer.from_pretrained(self.task_config.pretrained_text) - else: - self.tokenizer = ClipTokenizer() - if self.rank == 0: - logger.info("voacb_size:{}".format(self.tokenizer.vocab_size)) - t_config = AutoConfig.from_pretrained(self.task_config.pretrained_text) - self.text_encoder = TextEncoder(self.task_config, cross_config) - self.text_encoder_k = TextEncoder(self.task_config, cross_config) - self.t_projector = MLP(num_layers=cross_config.proj_num_layers) - self.t_projector_k = MLP(num_layers=cross_config.proj_num_layers) - nn.SyncBatchNorm.convert_sync_batchnorm(self.t_projector) - nn.SyncBatchNorm.convert_sync_batchnorm(self.t_projector_k) - # for MLM - t_config.hidden_size = cross_config.temporal_hidden_size - t_config.vocab_size = self.tokenizer.vocab_size - self.cls = BertLMPredictionHead(t_config) - ################## visual_encoder - self.visual_encoder = VisualEncoder(self.task_config, cross_config) - self.visual_encoder_k = VisualEncoder(self.task_config, cross_config) - self.v_projector = MLP(num_layers=cross_config.proj_num_layers) - self.v_projector_k = MLP(num_layers=cross_config.proj_num_layers) - self.v_predictor = MLP(num_layers=cross_config.pred_num_layers) - nn.SyncBatchNorm.convert_sync_batchnorm(self.v_projector) - nn.SyncBatchNorm.convert_sync_batchnorm(self.v_projector_k) - nn.SyncBatchNorm.convert_sync_batchnorm(self.v_predictor) - ################# momemtun mdoel pairs - self.model_pairs = [[self.visual_encoder, self.visual_encoder_k], - [self.text_encoder, self.text_encoder_k], - [self.v_projector, self.v_projector_k], - [self.t_projector, self.t_projector_k], - ] - self.copy_params() - ################## create queue - self.register_buffer("queue_v_cross_ng", torch.randn(cross_config.temporal_hidden_size, self.contrast_num_negative)) - self.register_buffer("queue_frame_proj_ng", torch.randn(cross_config.temporal_hidden_size, - self.contrast_num_negative * self.task_config.max_frames)) - self.register_buffer("queue_frame_cross_ng", torch.randn(cross_config.temporal_hidden_size, - self.contrast_num_negative * self.task_config.max_frames)) - self.register_buffer("queue_title_cross_ng", torch.randn(cross_config.temporal_hidden_size, self.contrast_num_negative)) - self.register_buffer("queue_tag_cross_ng", torch.randn(cross_config.temporal_hidden_size, self.contrast_num_negative)) - self.queue_v_cross_ng = F.normalize(self.queue_v_cross_ng, dim=0) - self.queue_frame_proj_ng = F.normalize(self.queue_frame_proj_ng, dim=0) - self.queue_frame_cross_ng = F.normalize(self.queue_frame_cross_ng, dim=0) - self.queue_title_cross_ng = F.normalize(self.queue_title_cross_ng, dim=0) - self.queue_tag_cross_ng = F.normalize(self.queue_tag_cross_ng, dim=0) - - self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long)) - - ################## loss function - self.loss_fct = CrossEn() - self.loss_fct_dual = Dual_CrossEn() - - - # self.apply(self.init_weights) - - def get_mlm_loss(self, input_ids, input_mask): - to_mask_input_ids = input_ids.clone() - input_labels = to_mask_input_ids.clone() - input_probability_matrix = torch.full(input_labels.shape, self.mlm_probability) - masked_input_ids, input_labels = self.mask(to_mask_input_ids, self.tokenizer.vocab_size, - input_mask.device, targets=input_labels, - probability_matrix=input_probability_matrix) - masked_input_output = self.text_encoder(masked_input_ids, input_mask, return_hidden=True) - mlm_input_loss = self.calculate_mlm_loss(masked_input_output, input_labels) - return mlm_input_loss - - def calculate_mlm_loss(self, sequence_output_mlm, labels): - - mlm_scores = self.cls(sequence_output_mlm) - # logger.info("sequence_output_mlm.shape:{}".format(sequence_output_mlm.shape)) - # logger.info("mlm_scores.shape:{}".format(mlm_scores.shape)) - # logger.info("labels.shape:{}".format(labels.shape)) - mlm_loss = F.cross_entropy(mlm_scores.view(-1, self.tokenizer.vocab_size), - labels.view(-1), ignore_index=-100) - return mlm_loss - - def mask(self, input_ids, vocab_size, device, targets=None, masked_indices=None, probability_matrix=None): - if masked_indices is None: - masked_indices = torch.bernoulli(probability_matrix).bool() - - masked_indices[input_ids == self.tokenizer.pad_token_id] = False - masked_indices[input_ids == self.tokenizer.cls_token_id] = False - # logger.info("masked_indices:{}".format(masked_indices)) - # logger.info("masked_indices.shape:{}".format(masked_indices.shape)) - if targets is not None: - targets[~masked_indices] = -100 # We only compute loss on masked tokens - - # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) - indices_replaced = torch.bernoulli(torch.full(input_ids.shape, 0.8)).bool() & masked_indices - input_ids[indices_replaced] = self.tokenizer.mask_token_id - - # 10% of the time, we replace masked input tokens with random word - indices_random = torch.bernoulli(torch.full(input_ids.shape, 0.5)).bool() & masked_indices & ~indices_replaced - random_words = torch.randint(vocab_size, input_ids.shape, dtype=torch.long).to(device) - input_ids[indices_random] = random_words[indices_random] - # The rest of the time (10% of the time) we keep the masked input tokens unchanged - - if targets is not None: - return input_ids, targets - else: - return input_ids - - def loose_similarity(self, sequence_output, visual_output): - sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous() - - visual_output = visual_output.squeeze() - visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True) - - sequence_output = sequence_output.squeeze() - sequence_output = sequence_output / sequence_output.norm(dim=-1, keepdim=True) - - logit_scale = self.text_encoder.logit_scale.exp() - logit_scale.data = torch.clamp(logit_scale.data, max=100) - # if self.rank == 0: - # logger.info("logit_scale:{},dtype:{}".format(logit_scale, logit_scale.dtype)) - # logger.info("sequence_output.shape:{}".format(sequence_output.shape)) - # logger.info("visual_output.shape:{}".format(visual_output.shape)) - if len(visual_output.shape) == 2: - retrieve_logits = logit_scale * torch.matmul(sequence_output, visual_output.t()) - else: - visual_temp = visual_output.permute(0, 2, 1) - retrieve_logits = logit_scale * torch.matmul(sequence_output, visual_temp) - if len(retrieve_logits.shape) == 3: - retrieve_logits = retrieve_logits.permute(1, 0, 2) - - return retrieve_logits - - @torch.no_grad() - def copy_params(self): - for model_pair in self.model_pairs: - for param, param_k in zip(model_pair[0].parameters(), model_pair[1].parameters()): - param_k.data.copy_(param.data) # initialize - param_k.requires_grad = False # not update by gradient - - @torch.no_grad() - def _momentum_update(self): - for model_pair in self.model_pairs: - for param, param_k in zip(model_pair[0].parameters(), model_pair[1].parameters()): - param_k.data = param_k.data * self.contrast_momentum + param.data * (1. - self.contrast_momentum) - - @torch.no_grad() - def _dequeue_and_enqueue(self, v_fea_k, tag_fea_k, title_fea_k, frame_fea_k, frame_proj_k): - - # gather keys before updating queue - # [bs,hidden] - v_fea_k = F.normalize(v_fea_k, dim=1) - tag_fea_k = F.normalize(tag_fea_k, dim=1) - title_fea_k = F.normalize(title_fea_k, dim=1) - # [bs,frame,hidden] - frame_fea_k = F.normalize(frame_fea_k, dim=2) - frame_proj_k = F.normalize(frame_proj_k, dim=2) - - batch_size = v_fea_k.size(0) - frame_num = frame_fea_k.size(1) - frame_fea_k = frame_fea_k.view(-1, frame_fea_k.size(-1)) - frame_proj_k = frame_proj_k.view(-1, frame_proj_k.size(-1)) - - ptr = int(self.queue_ptr) - # if self.rank == 0: - # logger.info( - # "begin>>>>: ptr:{},batch_size:{},frame_num:{},queue_size:{}".format(ptr, batch_size, frame_num, self.contrast_num_negative)) - # logger.info("v1_self_k.shape:{},tag_cross_k.shape:{},frame_proj_k.shape:{}".format(v_fea_k.shape, tag_fea_k.shape, frame_proj_k.shape)) - - # replace the keys at ptr (dequeue and enqueue) - self.queue_v_cross_ng[:, ptr:ptr + batch_size] = v_fea_k.T - self.queue_tag_cross_ng[:, ptr:ptr + batch_size] = tag_fea_k.T - self.queue_title_cross_ng[:, ptr:ptr + batch_size] = title_fea_k.T - - self.queue_frame_proj_ng[:, ptr * frame_num:(ptr + batch_size) * frame_num] = frame_proj_k.T - self.queue_frame_cross_ng[:, ptr * frame_num:(ptr + batch_size) * frame_num] = frame_fea_k.T - # move pointer - ptr = (ptr + batch_size) % self.contrast_num_negative - - # if self.rank == 0: - # logger.info("end>>>>: ptr:{}".format(ptr)) - self.queue_ptr[0] = ptr - - def contrastive_loss(self, q, k, queue): - - q = q.squeeze() - q = F.normalize(q, dim=1) - k = k.squeeze() - k = F.normalize(k, dim=1) - - bs = q.size(0) - # logger.info("q.dtype:{},k.dtype:{}".format(q.dtype, k.dtype)) - # positive logits: Nx1 - # >>>>>>got error in apex:amp level=01!!!!!!!!! - # l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1) - l_pos = torch.matmul(q, k.T) - l_pos = torch.diag(l_pos).reshape([bs, -1]) - # negative logits: NxK - # l_neg = torch.einsum('nc,ck->nk', [q, queue.clone().detach()]) - l_neg = torch.matmul(q, queue.clone().detach()) - # logits: Nx(1+K) - logits = torch.cat([l_pos, l_neg], dim=1) - # if self.rank == 0: - # logger.info("logits.shape:{}".format(logits.shape)) - # apply temperature - logits /= self.contrast_temperature - - # labels: positive key indicators - labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda() - - return F.cross_entropy(logits, labels) - - def frame_self_loss(self, frame_fea, frame_fea_k, queue_frame_ng): - loss = 0. - - for i in range(frame_fea.size(1) - 1): - frame_loss = self.contrastive_loss(frame_fea[:, i, :], frame_fea_k[:, i+1, :], queue_frame_ng) \ - + self.contrastive_loss(frame_fea[:, i+1, :], frame_fea_k[:, i, :], queue_frame_ng) - loss += frame_loss - loss = loss / (frame_fea.size(1) - 1) - return loss - - def frame_cross_loss(self, frame_fea, frame_fea_k, queue_frame_ng, text_fea, text_fea_k, queue_text_ng): - loss = 0. - for i in range(frame_fea.size(1)): - frame_loss = self.contrastive_loss(text_fea, frame_fea_k[:, i, :], queue_frame_ng) + \ - self.contrastive_loss(frame_fea[:, i, :], text_fea_k, queue_text_ng) - loss += frame_loss - loss = loss / frame_fea.size(1) - return loss - - def forward(self, video_data, video_frame, tag_ids, tag_mask, title_ids, title_mask, global_step): - tag_ids = tag_ids.view(-1, tag_ids.shape[-1]) - tag_mask = tag_mask.view(-1, tag_mask.shape[-1]) - title_ids = title_ids.view(-1, title_ids.shape[-1]) - title_mask = title_mask.view(-1, title_mask.shape[-1]) - # bs x frames x 3 x H x W - video = torch.as_tensor(video_data) - - if self.rank == 0 and global_step % self.task_config.n_display == 0: - logger.info("video1.shape:{}, dtype:{}, device:{}".format(video.shape, video.dtype, video.device)) - - if self.training: - # loss = 0.0 - v_fea, frame_fea = self.visual_encoder(video, video_frame) - if self.task_config.dataset == "bird": - tag_fea = self.text_encoder(tag_ids, tag_mask) - title_fea = self.text_encoder(title_ids, title_mask) - - # for video self supervised learning - # [bs,hidden_size] - bs, frame, hidden = frame_fea.shape - frame_fea = frame_fea.view(-1, hidden) - frame_proj = self.v_projector(frame_fea) - frame_pred = self.v_predictor(frame_proj) - frame_fea = frame_fea.view(bs, frame, hidden) - frame_proj = frame_proj.view(bs, frame, hidden) - frame_pred = frame_pred.view(bs, frame, hidden) - if self.rank == 0 and global_step % self.task_config.n_display == 0: - logger.info("v_fea.shape:{},device:{}".format(v_fea.shape, v_fea.device)) - logger.info("frame_fea.shape:{},device:{}".format(frame_fea.shape, frame_fea.device)) - logger.info("frame_proj.shape:{},device:{}".format(frame_proj.shape, frame_proj.device)) - logger.info("title_fea.shape:{}".format(title_fea.shape)) - logger.info("queue_v_cross_ng.shape:{}".format(self.queue_v_cross_ng.shape)) - # compute key features - with torch.no_grad(): # no gradient to keys - self._momentum_update() # update the key encoder - - tag_fea_k = self.text_encoder_k(tag_ids, tag_mask) - title_fea_k = self.text_encoder_k(title_ids, title_mask) - # - v_fea_k, frame_fea_k = self.visual_encoder_k(video, video_frame) - frame_fea_k = frame_fea_k.view(-1, hidden) - frame_proj_k = self.v_projector_k(frame_fea_k) - frame_fea_k = frame_fea_k.view(bs, frame, hidden) - frame_proj_k = frame_proj_k.view(bs, frame, hidden) - - # compute loss - if self.rank == 0 and global_step % self.task_config.n_display == 0: - logger.info( - "dtype: v_fea:{},v_fea_k:{},title_fea:{}".format(v_fea.dtype, v_fea_k.dtype, title_fea.dtype)) - # single video modality: video queue loss - loss_FAM = self.frame_self_loss(frame_pred, frame_proj_k, self.queue_frame_proj_ng) - # cross modality: cross queue loss - v_title_queue_loss = self.contrastive_loss(v_fea, title_fea_k, self.queue_title_cross_ng) \ - + self.contrastive_loss(title_fea, v_fea_k, self.queue_v_cross_ng) - if self.task_config.dataset == "bird": - v_tag_queue_loss = self.contrastive_loss(v_fea, tag_fea_k, self.queue_tag_cross_ng) \ - + self.contrastive_loss(tag_fea, v_fea_k, self.queue_v_cross_ng) - loss_VTM = (v_tag_queue_loss + v_title_queue_loss) / 2 - else: - loss_VTM = v_title_queue_loss - - loss_FTM = 0. - if self.task_config.use_frame_fea: - frame_title_loss = self.frame_cross_loss(frame_fea, frame_fea_k, self.queue_frame_cross_ng, title_fea, - title_fea_k, self.queue_title_cross_ng) - if self.task_config.dataset == "bird": - frame_tag_loss = self.frame_cross_loss(frame_fea, frame_fea_k, self.queue_frame_cross_ng, tag_fea, - tag_fea_k, self.queue_tag_cross_ng) - loss_FTM += (frame_tag_loss + frame_title_loss) / 2 - else: - loss_FTM = frame_title_loss - - # single text modality: text queue loss - # t_queue_loss = self.contrastive_loss(title_fea, tag_fea_k, self.queue_tag_cross_ng) \ - # + self.contrastive_loss(tag_fea, title_fea_k, self.queue_v_cross_ng) - - # dequeue_and_enqueue - self._dequeue_and_enqueue(v_fea_k, tag_fea_k, title_fea_k, frame_fea_k, frame_proj_k) - - # mlm loss - - mlm_title_loss = self.get_mlm_loss(title_ids, title_mask) - if self.task_config.dataset == "bird": - mlm_tag_loss = self.get_mlm_loss(tag_ids, tag_mask) - loss_MLM = (mlm_tag_loss + mlm_title_loss) / 2 - else: - loss_MLM = mlm_title_loss - - # total loss - loss = self.weight_FAM * loss_FAM + self.weight_VTM * loss_VTM + self.weight_FTM * loss_FTM + self.weight_MLM * loss_MLM - if self.rank == 0: - if global_step % self.task_config.n_display == 0: - logger.info("loss:{},loss_FAM:{},loss_VTM:{},loss_FTM:{},loss_MLM:{}" - "".format(loss, loss_FAM, loss_VTM, loss_FTM, loss_MLM)) - if self.task_config.logdir: - loss_item = {"loss": float(loss), "loss_FAM": float(loss_FAM), "loss_VTM": float(loss_VTM), - "loss_FTM": float(loss_FTM), "loss_MLM": float(loss_MLM)} - self.task_config.writer.add_scalars('loss', loss_item, global_step=global_step) - # self.task_config.writer.add_scalar('loss', video_cross_loss, global_step=global_step) - return loss - else: - return None - - -class BirdModel(BirdPreTrainedModel): - def __init__(self, cross_config, task_config): - super(BirdPreTrainedModel, self).__init__(cross_config) - self.task_config = task_config - self.rank = task_config.local_rank - # self.weight_sim = torch.nn.Parameter(torch.tensor([0.9], dtype=torch.float32), requires_grad=True) - self.weight_VTM_finetune = cross_config.weight_VTM_finetune - self.weight_FTM_finetune = cross_config.weight_FTM_finetune - self.top_frames = task_config.top_frames - ################## text Encoder - self.text_encoder = TextEncoder(self.task_config, cross_config) - ################## visual_encoder - self.visual_encoder = VisualEncoder(self.task_config, cross_config) - ################## loss function - self.loss_fct = CrossEn() - self.loss_fct_dual = Dual_CrossEn() - - def frame_loss(self, query_output, frame_output): - frame_num = frame_output.size(1) - loss = 0. - for i in range(frame_num): - frame_single = frame_output[:, i, :].squeeze() - sim_matrix = self.loose_similarity(query_output, frame_single) - sim_loss = self.loss_fct(sim_matrix) + self.loss_fct(sim_matrix.T) - loss += sim_loss / frame_num - # logger.info("frame_output.shape:{},dtype:{}".format(frame_output.shape, frame_output.dtype)) - # logger.info("query_output.shape:{},dtype:{}".format(query_output.shape, frame_output.dtype)) - # sim_matrix = self.loose_similarity(query_output, frame_output) - # sim_matrix = torch.topk(sim_matrix, k=self.top_frames, dim=2)[0] - # sim_matrix = torch.mean(sim_matrix, dim=2) - # sim_loss = self.loss_fct(sim_matrix) + self.loss_fct(sim_matrix.T) - # loss += sim_loss - return loss - - def forward(self, query_ids, query_mask, video_data, video_frame, idx, global_step): - query_ids = query_ids.view(-1, query_ids.shape[-1]) - query_mask = query_mask.view(-1, query_mask.shape[-1]) - # T x 3 x H x W - video = torch.as_tensor(video_data) - # if self.rank == 0: - # logger.info("video.shape:{}, dtype:{}".format(video.shape, video.dtype)) - if self.training: - loss = 0.0 - query_output = self.text_encoder(query_ids, query_mask) - visual_output, frame_output = self.visual_encoder(video, video_frame) - # if self.rank == 0: - # logger.info("query_output.shape:{},dtype:{}".format(query_output.shape, query_output.dtype)) - # logger.info("visual_output.shape:{},dtype:{}".format(visual_output.shape, visual_output.dtype)) - # logger.info("frame_output.shape:{},dtype:{}".format(frame_output.shape, frame_output.dtype)) - - # frame loss - if self.task_config.use_frame_fea: - frame_loss = self.frame_loss(query_output, frame_output) - loss += self.weight_FTM_finetune * frame_loss - # video loss - sim_matrix = self.loose_similarity(query_output, visual_output) - sim_loss = self.loss_fct(sim_matrix) + self.loss_fct(sim_matrix.T) - loss += self.weight_VTM_finetune * sim_loss - # loss += sim_loss - - if self.task_config.local_rank == 0: - if global_step % self.task_config.n_display == 0: - logger.info( - "loss:{},frame_loss:{},sim_loss:{},type:{},sim_matrix.shape:{}".format(loss, loss - sim_loss, - sim_loss, sim_loss.dtype, sim_matrix.shape)) - - if self.task_config.logdir: - self.task_config.writer.add_scalar('loss', float(loss), global_step=global_step) - return loss - else: - return None - - -class MLP(nn.Module): - def __init__(self, in_dim=512, inner_dim=4096, out_dim=512, num_layers=2): - super(MLP, self).__init__() - - # hidden layers - linear_hidden = [nn.Identity()] - for i in range(num_layers - 1): - linear_hidden.append(nn.Linear(in_dim if i == 0 else inner_dim, inner_dim)) - linear_hidden.append(nn.BatchNorm1d(inner_dim)) - linear_hidden.append(nn.ReLU(inplace=True)) - self.linear_hidden = nn.Sequential(*linear_hidden) - - self.linear_out = nn.Linear(in_dim if num_layers == 1 else inner_dim, - out_dim) if num_layers >= 1 else nn.Identity() - - def forward(self, x): - x = self.linear_hidden(x) - x = self.linear_out(x) - - return x diff --git a/spaces/chendl/compositional_test/transformers/examples/pytorch/question-answering/trainer_seq2seq_qa.py b/spaces/chendl/compositional_test/transformers/examples/pytorch/question-answering/trainer_seq2seq_qa.py deleted file mode 100644 index 6abb41b33feb8c3abbe629ba483cfe5051875975..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/pytorch/question-answering/trainer_seq2seq_qa.py +++ /dev/null @@ -1,162 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The HuggingFace Team All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -A subclass of `Trainer` specific to Question-Answering tasks -""" -import math -import time -from typing import Dict, List, Optional - -from torch.utils.data import Dataset - -from transformers import Seq2SeqTrainer, is_torch_tpu_available -from transformers.trainer_utils import PredictionOutput, speed_metrics - - -if is_torch_tpu_available(check_device=False): - import torch_xla.core.xla_model as xm - import torch_xla.debug.metrics as met - - -class QuestionAnsweringSeq2SeqTrainer(Seq2SeqTrainer): - def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs): - super().__init__(*args, **kwargs) - self.eval_examples = eval_examples - self.post_process_function = post_process_function - - # def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None, metric_key_prefix: str = "eval"): - def evaluate( - self, - eval_dataset: Optional[Dataset] = None, - eval_examples=None, - ignore_keys: Optional[List[str]] = None, - metric_key_prefix: str = "eval", - **gen_kwargs, - ) -> Dict[str, float]: - gen_kwargs = gen_kwargs.copy() - gen_kwargs["max_length"] = ( - gen_kwargs["max_length"] if gen_kwargs.get("max_length") is not None else self.args.generation_max_length - ) - gen_kwargs["num_beams"] = ( - gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.args.generation_num_beams - ) - self._gen_kwargs = gen_kwargs - - eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset - eval_dataloader = self.get_eval_dataloader(eval_dataset) - eval_examples = self.eval_examples if eval_examples is None else eval_examples - - # Temporarily disable metric computation, we will do it in the loop here. - compute_metrics = self.compute_metrics - self.compute_metrics = None - start_time = time.time() - eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop - try: - output = eval_loop( - eval_dataloader, - description="Evaluation", - # No point gathering the predictions if there are no metrics, otherwise we defer to - # self.args.prediction_loss_only - prediction_loss_only=True if compute_metrics is None else None, - ignore_keys=ignore_keys, - metric_key_prefix=metric_key_prefix, - ) - finally: - self.compute_metrics = compute_metrics - total_batch_size = self.args.eval_batch_size * self.args.world_size - if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: - start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] - output.metrics.update( - speed_metrics( - metric_key_prefix, - start_time, - num_samples=output.num_samples, - num_steps=math.ceil(output.num_samples / total_batch_size), - ) - ) - - if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: - # Only the main node write the results by default - eval_preds = self.post_process_function(eval_examples, eval_dataset, output) - metrics = self.compute_metrics(eval_preds) - - # Prefix all keys with metric_key_prefix + '_' - for key in list(metrics.keys()): - if not key.startswith(f"{metric_key_prefix}_"): - metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) - - metrics.update(output.metrics) - else: - metrics = output.metrics - - if self.args.should_log: - # Only the main node log the results by default - self.log(metrics) - - if self.args.tpu_metrics_debug or self.args.debug: - # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) - xm.master_print(met.metrics_report()) - - self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics) - return metrics - - def predict( - self, predict_dataset, predict_examples, ignore_keys=None, metric_key_prefix: str = "test", **gen_kwargs - ): - self._gen_kwargs = gen_kwargs.copy() - - predict_dataloader = self.get_test_dataloader(predict_dataset) - - # Temporarily disable metric computation, we will do it in the loop here. - compute_metrics = self.compute_metrics - self.compute_metrics = None - start_time = time.time() - eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop - try: - output = eval_loop( - predict_dataloader, - description="Prediction", - # No point gathering the predictions if there are no metrics, otherwise we defer to - # self.args.prediction_loss_only - prediction_loss_only=True if compute_metrics is None else None, - ignore_keys=ignore_keys, - metric_key_prefix=metric_key_prefix, - ) - finally: - self.compute_metrics = compute_metrics - - total_batch_size = self.args.eval_batch_size * self.args.world_size - if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: - start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] - output.metrics.update( - speed_metrics( - metric_key_prefix, - start_time, - num_samples=output.num_samples, - num_steps=math.ceil(output.num_samples / total_batch_size), - ) - ) - if self.post_process_function is None or self.compute_metrics is None: - return output - - predictions = self.post_process_function(predict_examples, predict_dataset, output, "predict") - metrics = self.compute_metrics(predictions) - - # Prefix all keys with metric_key_prefix + '_' - for key in list(metrics.keys()): - if not key.startswith(f"{metric_key_prefix}_"): - metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) - metrics.update(output.metrics) - return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics) diff --git a/spaces/chendl/compositional_test/transformers/examples/pytorch/token-classification/run_ner.py b/spaces/chendl/compositional_test/transformers/examples/pytorch/token-classification/run_ner.py deleted file mode 100644 index 0b785d983866b112f045d7b58a0f4d6400a4f2a0..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/pytorch/token-classification/run_ner.py +++ /dev/null @@ -1,634 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2020 The HuggingFace Team All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Fine-tuning the library models for token classification. -""" -# You can also adapt this script on your own token classification task and datasets. Pointers for this are left as -# comments. - -import logging -import os -import sys -from dataclasses import dataclass, field -from typing import Optional - -import datasets -import evaluate -import numpy as np -from datasets import ClassLabel, load_dataset - -import transformers -from transformers import ( - AutoConfig, - AutoModelForTokenClassification, - AutoTokenizer, - DataCollatorForTokenClassification, - HfArgumentParser, - PretrainedConfig, - PreTrainedTokenizerFast, - Trainer, - TrainingArguments, - set_seed, -) -from transformers.trainer_utils import get_last_checkpoint -from transformers.utils import check_min_version, send_example_telemetry -from transformers.utils.versions import require_version - - -# Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.28.0") - -require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt") - -logger = logging.getLogger(__name__) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, - ) - model_revision: str = field( - default="main", - metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script " - "with private models)." - ) - }, - ) - ignore_mismatched_sizes: bool = field( - default=False, - metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."}) - dataset_name: Optional[str] = field( - default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_file: Optional[str] = field( - default=None, metadata={"help": "The input training data file (a csv or JSON file)."} - ) - validation_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."}, - ) - test_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."}, - ) - text_column_name: Optional[str] = field( - default=None, metadata={"help": "The column name of text to input in the file (a csv or JSON file)."} - ) - label_column_name: Optional[str] = field( - default=None, metadata={"help": "The column name of label to input in the file (a csv or JSON file)."} - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - max_seq_length: int = field( - default=None, - metadata={ - "help": ( - "The maximum total input sequence length after tokenization. If set, sequences longer " - "than this will be truncated, sequences shorter will be padded." - ) - }, - ) - pad_to_max_length: bool = field( - default=False, - metadata={ - "help": ( - "Whether to pad all samples to model maximum sentence length. " - "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " - "efficient on GPU but very bad for TPU." - ) - }, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - max_predict_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of prediction examples to this " - "value if set." - ) - }, - ) - label_all_tokens: bool = field( - default=False, - metadata={ - "help": ( - "Whether to put the label for one word on all tokens of generated by that word or just on the " - "one (in which case the other tokens will have a padding index)." - ) - }, - ) - return_entity_level_metrics: bool = field( - default=False, - metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."}, - ) - - def __post_init__(self): - if self.dataset_name is None and self.train_file is None and self.validation_file is None: - raise ValueError("Need either a dataset name or a training/validation file.") - else: - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." - self.task_name = self.task_name.lower() - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The - # information sent is the one passed as arguments along with your Python/PyTorch versions. - send_example_telemetry("run_ner", model_args, data_args) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - - if training_args.should_log: - # The default of training_args.log_level is passive, so we set log level at info here to have that default. - transformers.utils.logging.set_verbosity_info() - - log_level = training_args.get_process_log_level() - logger.setLevel(log_level) - datasets.utils.logging.set_verbosity(log_level) - transformers.utils.logging.set_verbosity(log_level) - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" - + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - logger.info(f"Training/evaluation parameters {training_args}") - - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Set seed before initializing model. - set_seed(training_args.seed) - - # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) - # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub). - # - # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called - # 'text' is found. You can easily tweak this behavior (see below). - # - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if data_args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - raw_datasets = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - ) - else: - data_files = {} - if data_args.train_file is not None: - data_files["train"] = data_args.train_file - if data_args.validation_file is not None: - data_files["validation"] = data_args.validation_file - if data_args.test_file is not None: - data_files["test"] = data_args.test_file - extension = data_args.train_file.split(".")[-1] - raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. - - if training_args.do_train: - column_names = raw_datasets["train"].column_names - features = raw_datasets["train"].features - else: - column_names = raw_datasets["validation"].column_names - features = raw_datasets["validation"].features - - if data_args.text_column_name is not None: - text_column_name = data_args.text_column_name - elif "tokens" in column_names: - text_column_name = "tokens" - else: - text_column_name = column_names[0] - - if data_args.label_column_name is not None: - label_column_name = data_args.label_column_name - elif f"{data_args.task_name}_tags" in column_names: - label_column_name = f"{data_args.task_name}_tags" - else: - label_column_name = column_names[1] - - # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the - # unique labels. - def get_label_list(labels): - unique_labels = set() - for label in labels: - unique_labels = unique_labels | set(label) - label_list = list(unique_labels) - label_list.sort() - return label_list - - # If the labels are of type ClassLabel, they are already integers and we have the map stored somewhere. - # Otherwise, we have to get the list of labels manually. - labels_are_int = isinstance(features[label_column_name].feature, ClassLabel) - if labels_are_int: - label_list = features[label_column_name].feature.names - label_to_id = {i: i for i in range(len(label_list))} - else: - label_list = get_label_list(raw_datasets["train"][label_column_name]) - label_to_id = {l: i for i, l in enumerate(label_list)} - - num_labels = len(label_list) - - # Load pretrained model and tokenizer - # - # Distributed training: - # The .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - config = AutoConfig.from_pretrained( - model_args.config_name if model_args.config_name else model_args.model_name_or_path, - num_labels=num_labels, - finetuning_task=data_args.task_name, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - ) - - tokenizer_name_or_path = model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path - if config.model_type in {"bloom", "gpt2", "roberta"}: - tokenizer = AutoTokenizer.from_pretrained( - tokenizer_name_or_path, - cache_dir=model_args.cache_dir, - use_fast=True, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - add_prefix_space=True, - ) - else: - tokenizer = AutoTokenizer.from_pretrained( - tokenizer_name_or_path, - cache_dir=model_args.cache_dir, - use_fast=True, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - ) - - model = AutoModelForTokenClassification.from_pretrained( - model_args.model_name_or_path, - from_tf=bool(".ckpt" in model_args.model_name_or_path), - config=config, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, - ) - - # Tokenizer check: this script requires a fast tokenizer. - if not isinstance(tokenizer, PreTrainedTokenizerFast): - raise ValueError( - "This example script only works for models that have a fast tokenizer. Checkout the big table of models at" - " https://huggingface.co/transformers/index.html#supported-frameworks to find the model types that meet" - " this requirement" - ) - - # Model has labels -> use them. - if model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id: - if sorted(model.config.label2id.keys()) == sorted(label_list): - # Reorganize `label_list` to match the ordering of the model. - if labels_are_int: - label_to_id = {i: int(model.config.label2id[l]) for i, l in enumerate(label_list)} - label_list = [model.config.id2label[i] for i in range(num_labels)] - else: - label_list = [model.config.id2label[i] for i in range(num_labels)] - label_to_id = {l: i for i, l in enumerate(label_list)} - else: - logger.warning( - "Your model seems to have been trained with labels, but they don't match the dataset: ", - f"model labels: {sorted(model.config.label2id.keys())}, dataset labels:" - f" {sorted(label_list)}.\nIgnoring the model labels as a result.", - ) - - # Set the correspondences label/ID inside the model config - model.config.label2id = {l: i for i, l in enumerate(label_list)} - model.config.id2label = dict(enumerate(label_list)) - - # Map that sends B-Xxx label to its I-Xxx counterpart - b_to_i_label = [] - for idx, label in enumerate(label_list): - if label.startswith("B-") and label.replace("B-", "I-") in label_list: - b_to_i_label.append(label_list.index(label.replace("B-", "I-"))) - else: - b_to_i_label.append(idx) - - # Preprocessing the dataset - # Padding strategy - padding = "max_length" if data_args.pad_to_max_length else False - - # Tokenize all texts and align the labels with them. - def tokenize_and_align_labels(examples): - tokenized_inputs = tokenizer( - examples[text_column_name], - padding=padding, - truncation=True, - max_length=data_args.max_seq_length, - # We use this argument because the texts in our dataset are lists of words (with a label for each word). - is_split_into_words=True, - ) - labels = [] - for i, label in enumerate(examples[label_column_name]): - word_ids = tokenized_inputs.word_ids(batch_index=i) - previous_word_idx = None - label_ids = [] - for word_idx in word_ids: - # Special tokens have a word id that is None. We set the label to -100 so they are automatically - # ignored in the loss function. - if word_idx is None: - label_ids.append(-100) - # We set the label for the first token of each word. - elif word_idx != previous_word_idx: - label_ids.append(label_to_id[label[word_idx]]) - # For the other tokens in a word, we set the label to either the current label or -100, depending on - # the label_all_tokens flag. - else: - if data_args.label_all_tokens: - label_ids.append(b_to_i_label[label_to_id[label[word_idx]]]) - else: - label_ids.append(-100) - previous_word_idx = word_idx - - labels.append(label_ids) - tokenized_inputs["labels"] = labels - return tokenized_inputs - - if training_args.do_train: - if "train" not in raw_datasets: - raise ValueError("--do_train requires a train dataset") - train_dataset = raw_datasets["train"] - if data_args.max_train_samples is not None: - max_train_samples = min(len(train_dataset), data_args.max_train_samples) - train_dataset = train_dataset.select(range(max_train_samples)) - with training_args.main_process_first(desc="train dataset map pre-processing"): - train_dataset = train_dataset.map( - tokenize_and_align_labels, - batched=True, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on train dataset", - ) - - if training_args.do_eval: - if "validation" not in raw_datasets: - raise ValueError("--do_eval requires a validation dataset") - eval_dataset = raw_datasets["validation"] - if data_args.max_eval_samples is not None: - max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) - eval_dataset = eval_dataset.select(range(max_eval_samples)) - with training_args.main_process_first(desc="validation dataset map pre-processing"): - eval_dataset = eval_dataset.map( - tokenize_and_align_labels, - batched=True, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on validation dataset", - ) - - if training_args.do_predict: - if "test" not in raw_datasets: - raise ValueError("--do_predict requires a test dataset") - predict_dataset = raw_datasets["test"] - if data_args.max_predict_samples is not None: - max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) - predict_dataset = predict_dataset.select(range(max_predict_samples)) - with training_args.main_process_first(desc="prediction dataset map pre-processing"): - predict_dataset = predict_dataset.map( - tokenize_and_align_labels, - batched=True, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on prediction dataset", - ) - - # Data collator - data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None) - - # Metrics - metric = evaluate.load("seqeval") - - def compute_metrics(p): - predictions, labels = p - predictions = np.argmax(predictions, axis=2) - - # Remove ignored index (special tokens) - true_predictions = [ - [label_list[p] for (p, l) in zip(prediction, label) if l != -100] - for prediction, label in zip(predictions, labels) - ] - true_labels = [ - [label_list[l] for (p, l) in zip(prediction, label) if l != -100] - for prediction, label in zip(predictions, labels) - ] - - results = metric.compute(predictions=true_predictions, references=true_labels) - if data_args.return_entity_level_metrics: - # Unpack nested dictionaries - final_results = {} - for key, value in results.items(): - if isinstance(value, dict): - for n, v in value.items(): - final_results[f"{key}_{n}"] = v - else: - final_results[key] = value - return final_results - else: - return { - "precision": results["overall_precision"], - "recall": results["overall_recall"], - "f1": results["overall_f1"], - "accuracy": results["overall_accuracy"], - } - - # Initialize our Trainer - trainer = Trainer( - model=model, - args=training_args, - train_dataset=train_dataset if training_args.do_train else None, - eval_dataset=eval_dataset if training_args.do_eval else None, - tokenizer=tokenizer, - data_collator=data_collator, - compute_metrics=compute_metrics, - ) - - # Training - if training_args.do_train: - checkpoint = None - if training_args.resume_from_checkpoint is not None: - checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint - train_result = trainer.train(resume_from_checkpoint=checkpoint) - metrics = train_result.metrics - trainer.save_model() # Saves the tokenizer too for easy upload - - max_train_samples = ( - data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) - ) - metrics["train_samples"] = min(max_train_samples, len(train_dataset)) - - trainer.log_metrics("train", metrics) - trainer.save_metrics("train", metrics) - trainer.save_state() - - # Evaluation - if training_args.do_eval: - logger.info("*** Evaluate ***") - - metrics = trainer.evaluate() - - max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) - metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) - - trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", metrics) - - # Predict - if training_args.do_predict: - logger.info("*** Predict ***") - - predictions, labels, metrics = trainer.predict(predict_dataset, metric_key_prefix="predict") - predictions = np.argmax(predictions, axis=2) - - # Remove ignored index (special tokens) - true_predictions = [ - [label_list[p] for (p, l) in zip(prediction, label) if l != -100] - for prediction, label in zip(predictions, labels) - ] - - trainer.log_metrics("predict", metrics) - trainer.save_metrics("predict", metrics) - - # Save predictions - output_predictions_file = os.path.join(training_args.output_dir, "predictions.txt") - if trainer.is_world_process_zero(): - with open(output_predictions_file, "w") as writer: - for prediction in true_predictions: - writer.write(" ".join(prediction) + "\n") - - kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "token-classification"} - if data_args.dataset_name is not None: - kwargs["dataset_tags"] = data_args.dataset_name - if data_args.dataset_config_name is not None: - kwargs["dataset_args"] = data_args.dataset_config_name - kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" - else: - kwargs["dataset"] = data_args.dataset_name - - if training_args.push_to_hub: - trainer.push_to_hub(**kwargs) - else: - trainer.create_model_card(**kwargs) - - -def _mp_fn(index): - # For xla_spawn (TPUs) - main() - - -if __name__ == "__main__": - main() diff --git a/spaces/chenyangqi/FateZero/FateZero/video_diffusion/common/image_util.py b/spaces/chenyangqi/FateZero/FateZero/video_diffusion/common/image_util.py deleted file mode 100644 index f5258a4c7d49ca266eb73287c09aa7ee18fa9421..0000000000000000000000000000000000000000 --- a/spaces/chenyangqi/FateZero/FateZero/video_diffusion/common/image_util.py +++ /dev/null @@ -1,203 +0,0 @@ -import os -import math -import textwrap - -import imageio -import numpy as np -from typing import Sequence -import requests -import cv2 -from PIL import Image, ImageDraw, ImageFont - -import torch -from torchvision import transforms -from einops import rearrange - - - - - - -IMAGE_EXTENSION = (".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif", ".tiff", ".webp") - -FONT_URL = "https://raw.github.com/googlefonts/opensans/main/fonts/ttf/OpenSans-Regular.ttf" -FONT_PATH = "./docs/OpenSans-Regular.ttf" - - -def pad(image: Image.Image, top=0, right=0, bottom=0, left=0, color=(255, 255, 255)) -> Image.Image: - new_image = Image.new(image.mode, (image.width + right + left, image.height + top + bottom), color) - new_image.paste(image, (left, top)) - return new_image - - -def download_font_opensans(path=FONT_PATH): - font_url = FONT_URL - response = requests.get(font_url) - os.makedirs(os.path.dirname(path), exist_ok=True) - with open(path, "wb") as f: - f.write(response.content) - - -def annotate_image_with_font(image: Image.Image, text: str, font: ImageFont.FreeTypeFont) -> Image.Image: - image_w = image.width - _, _, text_w, text_h = font.getbbox(text) - line_size = math.floor(len(text) * image_w / text_w) - - lines = textwrap.wrap(text, width=line_size) - padding = text_h * len(lines) - image = pad(image, top=padding + 3) - - ImageDraw.Draw(image).text((0, 0), "\n".join(lines), fill=(0, 0, 0), font=font) - return image - - -def annotate_image(image: Image.Image, text: str, font_size: int = 15): - if not os.path.isfile(FONT_PATH): - download_font_opensans() - font = ImageFont.truetype(FONT_PATH, size=font_size) - return annotate_image_with_font(image=image, text=text, font=font) - - -def make_grid(images: Sequence[Image.Image], rows=None, cols=None) -> Image.Image: - if isinstance(images[0], np.ndarray): - images = [Image.fromarray(i) for i in images] - - if rows is None: - assert cols is not None - rows = math.ceil(len(images) / cols) - else: - cols = math.ceil(len(images) / rows) - - w, h = images[0].size - grid = Image.new("RGB", size=(cols * w, rows * h)) - for i, image in enumerate(images): - if image.size != (w, h): - image = image.resize((w, h)) - grid.paste(image, box=(i % cols * w, i // cols * h)) - return grid - - -def save_images_as_gif( - images: Sequence[Image.Image], - save_path: str, - loop=0, - duration=100, - optimize=False, -) -> None: - - images[0].save( - save_path, - save_all=True, - append_images=images[1:], - optimize=optimize, - loop=loop, - duration=duration, - ) - -def save_images_as_mp4( - images: Sequence[Image.Image], - save_path: str, -) -> None: - # images[0].save( - # save_path, - # save_all=True, - # append_images=images[1:], - # optimize=optimize, - # loop=loop, - # duration=duration, - # ) - writer_edit = imageio.get_writer( - save_path, - fps=10) - for i in images: - init_image = i.convert("RGB") - writer_edit.append_data(np.array(init_image)) - writer_edit.close() - - - -def save_images_as_folder( - images: Sequence[Image.Image], - save_path: str, -) -> None: - os.makedirs(save_path, exist_ok=True) - for index, image in enumerate(images): - init_image = image - if len(np.array(init_image).shape) == 3: - cv2.imwrite(os.path.join(save_path, f"{index:05d}.png"), np.array(init_image)[:, :, ::-1]) - else: - cv2.imwrite(os.path.join(save_path, f"{index:05d}.png"), np.array(init_image)) - -def log_train_samples( - train_dataloader, - save_path, - num_batch: int = 4, -): - train_samples = [] - for idx, batch in enumerate(train_dataloader): - if idx >= num_batch: - break - train_samples.append(batch["images"]) - - train_samples = torch.cat(train_samples).numpy() - train_samples = rearrange(train_samples, "b c f h w -> b f h w c") - train_samples = (train_samples * 0.5 + 0.5).clip(0, 1) - train_samples = numpy_batch_seq_to_pil(train_samples) - train_samples = [make_grid(images, cols=int(np.ceil(np.sqrt(len(train_samples))))) for images in zip(*train_samples)] - # save_images_as_gif(train_samples, save_path) - save_gif_mp4_folder_type(train_samples, save_path) - -def log_train_reg_samples( - train_dataloader, - save_path, - num_batch: int = 4, -): - train_samples = [] - for idx, batch in enumerate(train_dataloader): - if idx >= num_batch: - break - train_samples.append(batch["class_images"]) - - train_samples = torch.cat(train_samples).numpy() - train_samples = rearrange(train_samples, "b c f h w -> b f h w c") - train_samples = (train_samples * 0.5 + 0.5).clip(0, 1) - train_samples = numpy_batch_seq_to_pil(train_samples) - train_samples = [make_grid(images, cols=int(np.ceil(np.sqrt(len(train_samples))))) for images in zip(*train_samples)] - # save_images_as_gif(train_samples, save_path) - save_gif_mp4_folder_type(train_samples, save_path) - - -def save_gif_mp4_folder_type(images, save_path, save_gif=False): - - if isinstance(images[0], np.ndarray): - images = [Image.fromarray(i) for i in images] - elif isinstance(images[0], torch.Tensor): - images = [transforms.ToPILImage()(i.cpu().clone()[0]) for i in images] - save_path_mp4 = save_path.replace('gif', 'mp4') - save_path_folder = save_path.replace('.gif', '') - if save_gif: save_images_as_gif(images, save_path) - save_images_as_mp4(images, save_path_mp4) - save_images_as_folder(images, save_path_folder) - -# copy from video_diffusion/pipelines/stable_diffusion.py -def numpy_seq_to_pil(images): - """ - Convert a numpy image or a batch of images to a PIL image. - """ - if images.ndim == 3: - images = images[None, ...] - images = (images * 255).round().astype("uint8") - if images.shape[-1] == 1: - # special case for grayscale (single channel) images - pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images] - else: - pil_images = [Image.fromarray(image) for image in images] - - return pil_images - -# copy from diffusers-0.11.1/src/diffusers/pipeline_utils.py -def numpy_batch_seq_to_pil(images): - pil_images = [] - for sequence in images: - pil_images.append(numpy_seq_to_pil(sequence)) - return pil_images diff --git a/spaces/chikoto/Umamusume-DeBERTa-VITS2-TTS-JP/text/japanese_bert.py b/spaces/chikoto/Umamusume-DeBERTa-VITS2-TTS-JP/text/japanese_bert.py deleted file mode 100644 index 5f82016611d2c12a0bbdccc5a3bbefedd68ced4a..0000000000000000000000000000000000000000 --- a/spaces/chikoto/Umamusume-DeBERTa-VITS2-TTS-JP/text/japanese_bert.py +++ /dev/null @@ -1,57 +0,0 @@ -import torch -from transformers import AutoTokenizer, AutoModelForMaskedLM -import sys -from text.japanese import text2sep_kata - -tokenizer = AutoTokenizer.from_pretrained("ku-nlp/deberta-v2-base-japanese") -# tokenizer = AutoTokenizer.from_pretrained("ku-nlp/deberta-v2-base-japanese-with-auto-jumanpp") - -models = dict() - - -def get_bert_feature(text, word2ph, device=None): - sep_text, _ = text2sep_kata(text) - sep_tokens = [tokenizer.tokenize(t) for t in sep_text] - sep_ids = [tokenizer.convert_tokens_to_ids(t) for t in sep_tokens] - sep_ids = [2] + [item for sublist in sep_ids for item in sublist] + [3] - return get_bert_feature_with_token(sep_ids, word2ph, device) - - -def get_bert_feature_with_token(tokens, word2ph, device=None): - if ( - sys.platform == "darwin" - and torch.backends.mps.is_available() - and device == "cpu" - ): - device = "mps" - if not device: - device = "cuda" - if device not in models.keys(): - models[device] = AutoModelForMaskedLM.from_pretrained( - "ku-nlp/deberta-v2-base-japanese" - # "ku-nlp/deberta-v2-base-japanese-with-auto-jumanpp" - ).to(device) - with torch.no_grad(): - inputs = torch.tensor(tokens).to(device).unsqueeze(0) - token_type_ids = torch.zeros_like(inputs).to(device) - attention_mask = torch.ones_like(inputs).to(device) - inputs = { - "input_ids": inputs, - "token_type_ids": token_type_ids, - "attention_mask": attention_mask, - } - - # for i in inputs: - # inputs[i] = inputs[i].to(device) - res = models[device](**inputs, output_hidden_states=True) - res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu() - assert inputs["input_ids"].shape[-1] == len(word2ph) - word2phone = word2ph - phone_level_feature = [] - for i in range(len(word2phone)): - repeat_feature = res[i].repeat(word2phone[i], 1) - phone_level_feature.append(repeat_feature) - - phone_level_feature = torch.cat(phone_level_feature, dim=0) - - return phone_level_feature.T \ No newline at end of file diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/enum/__init__.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/enum/__init__.py deleted file mode 100644 index dd49faafd91cf427c527beaf942ef2230c4636d3..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/enum/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# encoding: utf-8 - -""" -Enumerations used in python-docx -""" - -from __future__ import absolute_import, print_function, unicode_literals - - -class Enumeration(object): - - @classmethod - def from_xml(cls, xml_val): - return cls._xml_to_idx[xml_val] - - @classmethod - def to_xml(cls, enum_val): - return cls._idx_to_xml[enum_val] diff --git a/spaces/cihyFjudo/fairness-paper-search/Attar Singh Acupressure.pdf.md b/spaces/cihyFjudo/fairness-paper-search/Attar Singh Acupressure.pdf.md deleted file mode 100644 index 3f375f7a74f08255524442ac6fc0afecc5d0349f..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Attar Singh Acupressure.pdf.md +++ /dev/null @@ -1,5 +0,0 @@ - -

Our firm M/s. B. Chattar Singh Jiwan Singh is an oldest firm, who has been publishing books on Sikh religion. This firm was established in 1880 A.D. for the propogation of the Sikh religion. Keeping this aim in view we started composing the hand written Guru Granth Sahib Ji to manifest in Gurdwaras.

-

Attar Singh Acupressure.pdf


DOWNLOAD ————— https://tinurli.com/2uwjfr



aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Rapoo Mouse Driver For Mac Get the Best Performance and Battery Life with Rapoo Mice.md b/spaces/cihyFjudo/fairness-paper-search/Rapoo Mouse Driver For Mac Get the Best Performance and Battery Life with Rapoo Mice.md deleted file mode 100644 index 523a86e8edeb3c123daa51d95f6406b37bf5565e..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Rapoo Mouse Driver For Mac Get the Best Performance and Battery Life with Rapoo Mice.md +++ /dev/null @@ -1,16 +0,0 @@ -
-

Below we have compiled a list of the most downloaded TV-shows worldwide (single episode) for 2015, together with the traditional ratings in the US. The download numbers are estimated by TorrentFreak based on several sources, including statistics reported by public BitTorrent trackers.

-

It is not really a spoiler to tell that this is a gender movie, although not a typical one, because it is dressed up as a lighthearted comedy, which it truly is by the way. 25 minutes into the movie however the only main topic being discussed is if one is gay or not. It has got a clever disguise calling it a "Dirty Weekend" with a gorgeous woman as lead character in it, being stranded on an airport with an older man (Matthew Broderick). But after 25 minutes it turns out that the woman is lesbian and Matthew Broderick isnt sure what he is, but he wants to find out anyway...

Even if the story about gender cant arouse much interest, you will be pleased to hear that "Dirty Weekend" can still be enjoyed as a regular, laid back comedy, because Matthew Broderick is his usual stumbling funny character. And even if the girl is lesbian and there is no romance between her and Matthew Broderick, she is still a knockout and has got some pretty sharp comments about Brodericks character, that push the story further into new directions which do surprise.

As far as gender movies are concerned, it is nice to see a movie that is suited to be seen by everybody. No sex at all. In a Walt Disney movie you would see more nudity. Charming, very slowpaced, light comedy with some surprising twists. Not great, not bad, just really enjoyable.

-

This Weekend 2015 movie kickass download


Download Zip > https://tinurli.com/2uwjsG



-

Flickriver widget for iGoogle or Netvibes can display almost any Flickriver view - most interesting today, by user, by group, by tag etc. Once added to your personalized homepage, just edit widget settings to select your desired view. Clickr on one of the buttons below to install: Embed on your Web siteTo embed this view, Copy and Paste the following HTML code:Black background (preview): White background (preview): close Firefox and Chrome addonsAdds a 'Flickriver' button to your browser.While viewing any Flickr photos page, click on this button to open the same view on Flickriver. Install in Firefox
Install in ChromeSearch pluginAdd 'Search on Flickriver' to your browser's search box. Works with Firefox and Internet Explorer.Install search plugin"View on Flickriver" Greasemonkey scriptA Greasemonkey script that adds Flickriver links to various Flickr photo pages - user photos, favorites, pools etc, allowing to quickly open the corresponding Flickriver view. Also, allows quickly viewing any Flickr photo on black background in large size.Install "View on Flickriver" scriptBookmarkletWhile viewing any Flickr photos page, click on the bookmarklet to open the same view on Flickriver.To install: drag and drop the following link to your Bookmarks toolbar.
IE users - add the link to your Favorites under the Links folder

  • View on Flickriver (open on same page)
  • View on Flickriver (open on new page)
Additional tools by iosart labs
  • ColorZilla for Firefox
  • ColorZilla for Chrome
  • Ultimate CSS Gradient Generator
close tools panel close link panelTo link to this page, Copy and Paste the following HTML code:Plain text link: preview: Dirty Weekend 2015 HD Movie Torrent Download on Flickriver

-

'Sicario' had me stoked from the start. It is hard to go wrong with the involvement of cinematographer Roger Deakins, director Denis Villeneuve, writer Taylor Sheridan, advertising that really makes one want to see the film, its critical acclaim and a cast like Emily Blunt, Josh Brolin and Benicio Del Toro.

In no way was 'Sicario' a let down. It completely lived up to the hype and all of the attractions/reasons for seeing the film were among the things that made 'Sicario' so good. 'Sicario' to me really is one of the best films of 2015, one of the best crime thriller films in years and a standout film of the decade. Its one stumbling block was that more could have been done with the character of Kate, the character and her motivations seemed a little underwritten somewhat and not always as decisive or plausible as ought. However, it was nowhere near as big a problem as made out (also was completely masked by how brilliant the rest of the film was), because it was clear that she was meant to be something of a fish out of water character and that was brought out very well, meaning that to me some of her actions made sense somewhat.

Everything about the film in general is brilliant and a tour-De-force of film-making, though it is understandable if it doesn't connect with all. 'Sicario' is impeccably made for starters, the setting is both audacious and visceral in showing the horrors and brutality of the setting and the editing is tight, stylish and enhances the brutal atmosphere. Best of all is the cinematography (in close competition in that year's Oscar category with 'The Revenant, which was a deserved winner), with Roger Deakins demonstrating once again why he is one of the best contemporary cinematographers in the business with cinematography that's stunning and darkly gritty, also bringing out the visceral horror.

Denis Villeneuve's directing has a beautiful darkness but also a hard edge that is perfect for the story that 'Sicario' tries to tell. The music score is haunting and pulse-pulsating, one can actually feel their heart beat with tension and anticipation. Can totally see why it was nominated for the Oscar and an easy second to Ennio Morricone's work for 'The Hateful Eight'. Can also see the acclaim for the sound editing, which added a huge amount to the film's authenticity, again up against tight competition in the category that year at the Oscars.

The script is tightly structured and rich in complexity, while the story doesn't need attention-grabbing set pieces to impress or make its point and has more than that on its mind. And all the better for it. It is heavily reliant on atmosphere, this is brutal, harrowing stuff that effectively shows the horrors of the situation and setting without overdoing or sugar-coating it, also showing respect to it. It's wonderfully murky and dark, while also taut and kinetic in its energy (despite the sometimes deliberate, but never dull, pacing).

You couldn't have gotten better performances. The standout is Benicio Del Toro, with the most interesting character (especially in the genuinely shocking final act) and giving a magnificently shady performance that's chilling but also conflicted. Emily Blunt conveys great strength and touching vulnerability, while Josh Brolin is tongue-in-cheek and charming with a touch of ambiguity. Daniel Kaluuya is sort of a moral compass character, something that Kaluuya is movingly sympathetic bringing out.

Overall, a masterpiece tour-De-force with only one minor and ignorable stumbling block. 10/10 Bethany Cox

-

In June 2014, Chloë Grace Moretz echoed her co-stars' sentiments when asked about Kick-Ass 3, stating that "I hope, I wish. That'd be fun. That'd be great. I doubt it but I would love it". She also cited the second film's lower box office gross as the key obstacle to the third chapter being produced and suggested file sharing was a factor: "The hard thing is if fans want a third movie, they've got to go buy the ticket to go see the movie. It was like the second most pirated movie of the year, so if you want a movie to be made into a second, a third, a fourth and a fifth, go buy a ticket. Don't pirate it."[59] In August 2014, Moretz reiterated her previous statements and said "sadly, I think I'm done with [Hit-Girl]".[60] In February 2015, Matthew Vaughn spoke optimistically about a "Hit-Girl" prequel. He stated "If that happens, I'm pretty sure I can persuade Aaron and Chloe to come back and finish the story of Kick-Ass."[61][62] On 17 June 2015, Vaughn stated in an answer to Yahoo that he is working on a prequel on how Hit-Girl and Big Daddy became superheroes and plans to make Kick-Ass 3 after.[63]

-

Hi
Thx for this, just downloaded and attached to a SQL Server 2017 instance.
I see that non-clustered-indexes has been removed, but does that also go for foreign key constraints? Or are there none of those in the prod-DB?

-

Thank you for making this available. Once my copy finishes downloading I plan to attempt to exapnd then repackage it applying a segment of compression that interests me called deduplication. If successful will share the results with you. Instead of distributing large zip archives that still need to be extracted the deduplication feature that is part of Windows server 2019 works really well on certain datasets. In the end you are left with a compact deduped/compressed vhd file powered by ntfs internally. No extraction required, just mount on win2019 and use.

-

However, a group of dedicated contributors to the site founded the Katcr.co forum shortly after to restore the popular torrent download site to its former glory. Many are wondering if this is the end of Kickass Torrents or if the site will somehow rise from the ashes as it has in the past.

-

The new Kickass Torrents website once again gives users access to a wide range of torrent downloads, including the latest movies, TV, music and software. As a leader in online piracy, Kickass Torrents has earned a reputation for offering high-quality torrents and a user-friendly interface.

-

This can be a fast and efficient way to download large files such as movies or TV, but it also has its drawbacks. Since torrenting is based on file sharing, it is often associated with pirated content, which is illegal in many countries.

-

-

However, it is worth noting that several Australian ISPs have blocked access to five popular torrent download websites, including Pirate Bay and Isohunt, due to copyright concerns. This action was taken after the original KickassTorrent website was shut down earlier this year, which led to several mirror sites claiming to be the legitimate continuation of the original website.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/[SEO Title Tags Best Practices and Examples](httpsmoz.comlearnseotitle-tag).md b/spaces/cihyFjudo/fairness-paper-search/[SEO Title Tags Best Practices and Examples](httpsmoz.comlearnseotitle-tag).md deleted file mode 100644 index 14adf5c5f96ee11988db6722775e3f33e5a2f968..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/[SEO Title Tags Best Practices and Examples](httpsmoz.comlearnseotitle-tag).md +++ /dev/null @@ -1,6 +0,0 @@ -

Kabhi Alvida Naa Kehna Songs Hd 1080134 installation nuetzli


Download 🆗 https://tinurli.com/2uwkNx



- - aaccfb2cb3
-
-
-

diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/ImageMath.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/ImageMath.py deleted file mode 100644 index ac7d36b698c2ec9839d8a771734c9f730f701534..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/ImageMath.py +++ /dev/null @@ -1,263 +0,0 @@ -# -# The Python Imaging Library -# $Id$ -# -# a simple math add-on for the Python Imaging Library -# -# History: -# 1999-02-15 fl Original PIL Plus release -# 2005-05-05 fl Simplified and cleaned up for PIL 1.1.6 -# 2005-09-12 fl Fixed int() and float() for Python 2.4.1 -# -# Copyright (c) 1999-2005 by Secret Labs AB -# Copyright (c) 2005 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - -import builtins - -from . import Image, _imagingmath - - -def _isconstant(v): - return isinstance(v, (int, float)) - - -class _Operand: - """Wraps an image operand, providing standard operators""" - - def __init__(self, im): - self.im = im - - def __fixup(self, im1): - # convert image to suitable mode - if isinstance(im1, _Operand): - # argument was an image. - if im1.im.mode in ("1", "L"): - return im1.im.convert("I") - elif im1.im.mode in ("I", "F"): - return im1.im - else: - msg = f"unsupported mode: {im1.im.mode}" - raise ValueError(msg) - else: - # argument was a constant - if _isconstant(im1) and self.im.mode in ("1", "L", "I"): - return Image.new("I", self.im.size, im1) - else: - return Image.new("F", self.im.size, im1) - - def apply(self, op, im1, im2=None, mode=None): - im1 = self.__fixup(im1) - if im2 is None: - # unary operation - out = Image.new(mode or im1.mode, im1.size, None) - im1.load() - try: - op = getattr(_imagingmath, op + "_" + im1.mode) - except AttributeError as e: - msg = f"bad operand type for '{op}'" - raise TypeError(msg) from e - _imagingmath.unop(op, out.im.id, im1.im.id) - else: - # binary operation - im2 = self.__fixup(im2) - if im1.mode != im2.mode: - # convert both arguments to floating point - if im1.mode != "F": - im1 = im1.convert("F") - if im2.mode != "F": - im2 = im2.convert("F") - if im1.size != im2.size: - # crop both arguments to a common size - size = (min(im1.size[0], im2.size[0]), min(im1.size[1], im2.size[1])) - if im1.size != size: - im1 = im1.crop((0, 0) + size) - if im2.size != size: - im2 = im2.crop((0, 0) + size) - out = Image.new(mode or im1.mode, im1.size, None) - im1.load() - im2.load() - try: - op = getattr(_imagingmath, op + "_" + im1.mode) - except AttributeError as e: - msg = f"bad operand type for '{op}'" - raise TypeError(msg) from e - _imagingmath.binop(op, out.im.id, im1.im.id, im2.im.id) - return _Operand(out) - - # unary operators - def __bool__(self): - # an image is "true" if it contains at least one non-zero pixel - return self.im.getbbox() is not None - - def __abs__(self): - return self.apply("abs", self) - - def __pos__(self): - return self - - def __neg__(self): - return self.apply("neg", self) - - # binary operators - def __add__(self, other): - return self.apply("add", self, other) - - def __radd__(self, other): - return self.apply("add", other, self) - - def __sub__(self, other): - return self.apply("sub", self, other) - - def __rsub__(self, other): - return self.apply("sub", other, self) - - def __mul__(self, other): - return self.apply("mul", self, other) - - def __rmul__(self, other): - return self.apply("mul", other, self) - - def __truediv__(self, other): - return self.apply("div", self, other) - - def __rtruediv__(self, other): - return self.apply("div", other, self) - - def __mod__(self, other): - return self.apply("mod", self, other) - - def __rmod__(self, other): - return self.apply("mod", other, self) - - def __pow__(self, other): - return self.apply("pow", self, other) - - def __rpow__(self, other): - return self.apply("pow", other, self) - - # bitwise - def __invert__(self): - return self.apply("invert", self) - - def __and__(self, other): - return self.apply("and", self, other) - - def __rand__(self, other): - return self.apply("and", other, self) - - def __or__(self, other): - return self.apply("or", self, other) - - def __ror__(self, other): - return self.apply("or", other, self) - - def __xor__(self, other): - return self.apply("xor", self, other) - - def __rxor__(self, other): - return self.apply("xor", other, self) - - def __lshift__(self, other): - return self.apply("lshift", self, other) - - def __rshift__(self, other): - return self.apply("rshift", self, other) - - # logical - def __eq__(self, other): - return self.apply("eq", self, other) - - def __ne__(self, other): - return self.apply("ne", self, other) - - def __lt__(self, other): - return self.apply("lt", self, other) - - def __le__(self, other): - return self.apply("le", self, other) - - def __gt__(self, other): - return self.apply("gt", self, other) - - def __ge__(self, other): - return self.apply("ge", self, other) - - -# conversions -def imagemath_int(self): - return _Operand(self.im.convert("I")) - - -def imagemath_float(self): - return _Operand(self.im.convert("F")) - - -# logical -def imagemath_equal(self, other): - return self.apply("eq", self, other, mode="I") - - -def imagemath_notequal(self, other): - return self.apply("ne", self, other, mode="I") - - -def imagemath_min(self, other): - return self.apply("min", self, other) - - -def imagemath_max(self, other): - return self.apply("max", self, other) - - -def imagemath_convert(self, mode): - return _Operand(self.im.convert(mode)) - - -ops = {} -for k, v in list(globals().items()): - if k[:10] == "imagemath_": - ops[k[10:]] = v - - -def eval(expression, _dict={}, **kw): - """ - Evaluates an image expression. - - :param expression: A string containing a Python-style expression. - :param options: Values to add to the evaluation context. You - can either use a dictionary, or one or more keyword - arguments. - :return: The evaluated expression. This is usually an image object, but can - also be an integer, a floating point value, or a pixel tuple, - depending on the expression. - """ - - # build execution namespace - args = ops.copy() - args.update(_dict) - args.update(kw) - for k, v in list(args.items()): - if hasattr(v, "im"): - args[k] = _Operand(v) - - compiled_code = compile(expression, "", "eval") - - def scan(code): - for const in code.co_consts: - if type(const) == type(compiled_code): - scan(const) - - for name in code.co_names: - if name not in args and name != "abs": - msg = f"'{name}' not allowed" - raise ValueError(msg) - - scan(compiled_code) - out = builtins.eval(expression, {"__builtins": {"abs": abs}}, args) - try: - return out.im - except AttributeError: - return out diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/aiosignal/__init__.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/aiosignal/__init__.py deleted file mode 100644 index 3d288e6ede67df2bb8e5660e30372e190eb23e65..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/aiosignal/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -from frozenlist import FrozenList - -__version__ = "1.3.1" - -__all__ = ("Signal",) - - -class Signal(FrozenList): - """Coroutine-based signal implementation. - - To connect a callback to a signal, use any list method. - - Signals are fired using the send() coroutine, which takes named - arguments. - """ - - __slots__ = ("_owner",) - - def __init__(self, owner): - super().__init__() - self._owner = owner - - def __repr__(self): - return "".format( - self._owner, self.frozen, list(self) - ) - - async def send(self, *args, **kwargs): - """ - Sends data to all registered receivers. - """ - if not self.frozen: - raise RuntimeError("Cannot send non-frozen signal.") - - for receiver in self: - await receiver(*args, **kwargs) # type: ignore diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/click/globals.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/click/globals.py deleted file mode 100644 index 480058f10dd6a8205d1bff0b94de7ae347a7629a..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/click/globals.py +++ /dev/null @@ -1,68 +0,0 @@ -import typing as t -from threading import local - -if t.TYPE_CHECKING: - import typing_extensions as te - from .core import Context - -_local = local() - - -@t.overload -def get_current_context(silent: "te.Literal[False]" = False) -> "Context": - ... - - -@t.overload -def get_current_context(silent: bool = ...) -> t.Optional["Context"]: - ... - - -def get_current_context(silent: bool = False) -> t.Optional["Context"]: - """Returns the current click context. This can be used as a way to - access the current context object from anywhere. This is a more implicit - alternative to the :func:`pass_context` decorator. This function is - primarily useful for helpers such as :func:`echo` which might be - interested in changing its behavior based on the current context. - - To push the current context, :meth:`Context.scope` can be used. - - .. versionadded:: 5.0 - - :param silent: if set to `True` the return value is `None` if no context - is available. The default behavior is to raise a - :exc:`RuntimeError`. - """ - try: - return t.cast("Context", _local.stack[-1]) - except (AttributeError, IndexError) as e: - if not silent: - raise RuntimeError("There is no active click context.") from e - - return None - - -def push_context(ctx: "Context") -> None: - """Pushes a new context to the current stack.""" - _local.__dict__.setdefault("stack", []).append(ctx) - - -def pop_context() -> None: - """Removes the top level from the stack.""" - _local.stack.pop() - - -def resolve_color_default(color: t.Optional[bool] = None) -> t.Optional[bool]: - """Internal helper to get the default value of the color flag. If a - value is passed it's returned unchanged, otherwise it's looked up from - the current context. - """ - if color is not None: - return color - - ctx = get_current_context(silent=True) - - if ctx is not None: - return ctx.color - - return None diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/pens/explicitClosingLinePen.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/pens/explicitClosingLinePen.py deleted file mode 100644 index e3c9c943cc504e970d4e9ec9f96c3817d8383ccf..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/pens/explicitClosingLinePen.py +++ /dev/null @@ -1,101 +0,0 @@ -from fontTools.pens.filterPen import ContourFilterPen - - -class ExplicitClosingLinePen(ContourFilterPen): - """A filter pen that adds an explicit lineTo to the first point of each closed - contour if the end point of the last segment is not already the same as the first point. - Otherwise, it passes the contour through unchanged. - - >>> from pprint import pprint - >>> from fontTools.pens.recordingPen import RecordingPen - >>> rec = RecordingPen() - >>> pen = ExplicitClosingLinePen(rec) - >>> pen.moveTo((0, 0)) - >>> pen.lineTo((100, 0)) - >>> pen.lineTo((100, 100)) - >>> pen.closePath() - >>> pprint(rec.value) - [('moveTo', ((0, 0),)), - ('lineTo', ((100, 0),)), - ('lineTo', ((100, 100),)), - ('lineTo', ((0, 0),)), - ('closePath', ())] - >>> rec = RecordingPen() - >>> pen = ExplicitClosingLinePen(rec) - >>> pen.moveTo((0, 0)) - >>> pen.lineTo((100, 0)) - >>> pen.lineTo((100, 100)) - >>> pen.lineTo((0, 0)) - >>> pen.closePath() - >>> pprint(rec.value) - [('moveTo', ((0, 0),)), - ('lineTo', ((100, 0),)), - ('lineTo', ((100, 100),)), - ('lineTo', ((0, 0),)), - ('closePath', ())] - >>> rec = RecordingPen() - >>> pen = ExplicitClosingLinePen(rec) - >>> pen.moveTo((0, 0)) - >>> pen.curveTo((100, 0), (0, 100), (100, 100)) - >>> pen.closePath() - >>> pprint(rec.value) - [('moveTo', ((0, 0),)), - ('curveTo', ((100, 0), (0, 100), (100, 100))), - ('lineTo', ((0, 0),)), - ('closePath', ())] - >>> rec = RecordingPen() - >>> pen = ExplicitClosingLinePen(rec) - >>> pen.moveTo((0, 0)) - >>> pen.curveTo((100, 0), (0, 100), (100, 100)) - >>> pen.lineTo((0, 0)) - >>> pen.closePath() - >>> pprint(rec.value) - [('moveTo', ((0, 0),)), - ('curveTo', ((100, 0), (0, 100), (100, 100))), - ('lineTo', ((0, 0),)), - ('closePath', ())] - >>> rec = RecordingPen() - >>> pen = ExplicitClosingLinePen(rec) - >>> pen.moveTo((0, 0)) - >>> pen.curveTo((100, 0), (0, 100), (0, 0)) - >>> pen.closePath() - >>> pprint(rec.value) - [('moveTo', ((0, 0),)), - ('curveTo', ((100, 0), (0, 100), (0, 0))), - ('closePath', ())] - >>> rec = RecordingPen() - >>> pen = ExplicitClosingLinePen(rec) - >>> pen.moveTo((0, 0)) - >>> pen.closePath() - >>> pprint(rec.value) - [('moveTo', ((0, 0),)), ('closePath', ())] - >>> rec = RecordingPen() - >>> pen = ExplicitClosingLinePen(rec) - >>> pen.closePath() - >>> pprint(rec.value) - [('closePath', ())] - >>> rec = RecordingPen() - >>> pen = ExplicitClosingLinePen(rec) - >>> pen.moveTo((0, 0)) - >>> pen.lineTo((100, 0)) - >>> pen.lineTo((100, 100)) - >>> pen.endPath() - >>> pprint(rec.value) - [('moveTo', ((0, 0),)), - ('lineTo', ((100, 0),)), - ('lineTo', ((100, 100),)), - ('endPath', ())] - """ - - def filterContour(self, contour): - if ( - not contour - or contour[0][0] != "moveTo" - or contour[-1][0] != "closePath" - or len(contour) < 3 - ): - return - movePt = contour[0][1][0] - lastSeg = contour[-2][1] - if lastSeg and movePt != lastSeg[-1]: - contour[-1:] = [("lineTo", (movePt,)), ("closePath", ())] diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jpeglsenc.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jpeglsenc.c deleted file mode 100644 index 53394102df3e6fdbe216a8165748fe89cd0533bd..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jpeglsenc.c +++ /dev/null @@ -1,492 +0,0 @@ -/* - * JPEG-LS encoder - * Copyright (c) 2003 Michael Niedermayer - * Copyright (c) 2006 Konstantin Shishkov - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * JPEG-LS encoder. - */ - -#define UNCHECKED_BITSTREAM_READER 1 -#include "libavutil/opt.h" -#include "avcodec.h" -#include "bytestream.h" -#include "codec_internal.h" -#include "encode.h" -#include "get_bits.h" -#include "put_bits.h" -#include "put_golomb.h" -#include "mathops.h" -#include "mjpeg.h" -#include "jpegls.h" - -typedef struct JPEGLSContext { - AVClass *class; - - int pred; - int comps; - - size_t size; - uint8_t *buf; -} JPEGLSContext; - -static inline void put_marker_byteu(PutByteContext *pb, enum JpegMarker code) -{ - bytestream2_put_byteu(pb, 0xff); - bytestream2_put_byteu(pb, code); -} - -/** - * Encode error from regular symbol - */ -static inline void ls_encode_regular(JLSState *state, PutBitContext *pb, int Q, - int err) -{ - int k; - int val; - int map; - - for (k = 0; (state->N[Q] << k) < state->A[Q]; k++) - ; - - map = !state->near && !k && (2 * state->B[Q] <= -state->N[Q]); - - if (err < 0) - err += state->range; - if (err >= (state->range + 1 >> 1)) { - err -= state->range; - val = 2 * FFABS(err) - 1 - map; - } else - val = 2 * err + map; - - set_ur_golomb_jpegls(pb, val, k, state->limit, state->qbpp); - - ff_jpegls_update_state_regular(state, Q, err); -} - -/** - * Encode error from run termination - */ -static inline void ls_encode_runterm(JLSState *state, PutBitContext *pb, - int RItype, int err, int limit_add) -{ - int k; - int val, map; - int Q = 365 + RItype; - int temp; - - temp = state->A[Q]; - if (RItype) - temp += state->N[Q] >> 1; - for (k = 0; (state->N[Q] << k) < temp; k++) - ; - map = 0; - if (!k && err && (2 * state->B[Q] < state->N[Q])) - map = 1; - - if (err < 0) - val = -(2 * err) - 1 - RItype + map; - else - val = 2 * err - RItype - map; - set_ur_golomb_jpegls(pb, val, k, state->limit - limit_add - 1, state->qbpp); - - if (err < 0) - state->B[Q]++; - state->A[Q] += (val + 1 - RItype) >> 1; - - ff_jpegls_downscale_state(state, Q); -} - -/** - * Encode run value as specified by JPEG-LS standard - */ -static inline void ls_encode_run(JLSState *state, PutBitContext *pb, int run, - int comp, int trail) -{ - while (run >= (1 << ff_log2_run[state->run_index[comp]])) { - put_bits(pb, 1, 1); - run -= 1 << ff_log2_run[state->run_index[comp]]; - if (state->run_index[comp] < 31) - state->run_index[comp]++; - } - /* if hit EOL, encode another full run, else encode aborted run */ - if (!trail && run) { - put_bits(pb, 1, 1); - } else if (trail) { - put_bits(pb, 1, 0); - if (ff_log2_run[state->run_index[comp]]) - put_bits(pb, ff_log2_run[state->run_index[comp]], run); - } -} - -/** - * Encode one line of image - */ -static inline void ls_encode_line(JLSState *state, PutBitContext *pb, - void *tmp, const void *in, int last2, int w, - int stride, int comp, int bits) -{ - int x = 0; - int Ra = R(tmp, 0), Rb, Rc = last2, Rd; - int D0, D1, D2; - - while (x < w) { - int err, pred, sign; - - /* compute gradients */ - Rb = R(tmp, x); - Rd = (x >= w - stride) ? R(tmp, x) : R(tmp, x + stride); - D0 = Rd - Rb; - D1 = Rb - Rc; - D2 = Rc - Ra; - - /* run mode */ - if ((FFABS(D0) <= state->near) && - (FFABS(D1) <= state->near) && - (FFABS(D2) <= state->near)) { - int RUNval, RItype, run; - - run = 0; - RUNval = Ra; - while (x < w && (FFABS(R(in, x) - RUNval) <= state->near)) { - run++; - W(tmp, x, Ra); - x += stride; - } - ls_encode_run(state, pb, run, comp, x < w); - if (x >= w) - return; - Rb = R(tmp, x); - RItype = FFABS(Ra - Rb) <= state->near; - pred = RItype ? Ra : Rb; - err = R(in, x) - pred; - - if (!RItype && Ra > Rb) - err = -err; - - if (state->near) { - if (err > 0) - err = (state->near + err) / state->twonear; - else - err = -(state->near - err) / state->twonear; - - if (RItype || (Rb >= Ra)) - Ra = av_clip(pred + err * state->twonear, 0, state->maxval); - else - Ra = av_clip(pred - err * state->twonear, 0, state->maxval); - } else - Ra = R(in, x); - W(tmp, x, Ra); - - if (err < 0) - err += state->range; - if (err >= state->range + 1 >> 1) - err -= state->range; - - ls_encode_runterm(state, pb, RItype, err, - ff_log2_run[state->run_index[comp]]); - - if (state->run_index[comp] > 0) - state->run_index[comp]--; - } else { /* regular mode */ - int context; - - context = ff_jpegls_quantize(state, D0) * 81 + - ff_jpegls_quantize(state, D1) * 9 + - ff_jpegls_quantize(state, D2); - pred = mid_pred(Ra, Ra + Rb - Rc, Rb); - - if (context < 0) { - context = -context; - sign = 1; - pred = av_clip(pred - state->C[context], 0, state->maxval); - err = pred - R(in, x); - } else { - sign = 0; - pred = av_clip(pred + state->C[context], 0, state->maxval); - err = R(in, x) - pred; - } - - if (state->near) { - if (err > 0) - err = (state->near + err) / state->twonear; - else - err = -(state->near - err) / state->twonear; - if (!sign) - Ra = av_clip(pred + err * state->twonear, 0, state->maxval); - else - Ra = av_clip(pred - err * state->twonear, 0, state->maxval); - } else - Ra = R(in, x); - W(tmp, x, Ra); - - ls_encode_regular(state, pb, context, err); - } - Rc = Rb; - x += stride; - } -} - -static void ls_store_lse(JLSState *state, PutByteContext *pb) -{ - /* Test if we have default params and don't need to store LSE */ - JLSState state2 = { 0 }; - state2.bpp = state->bpp; - state2.near = state->near; - ff_jpegls_reset_coding_parameters(&state2, 1); - if (state->T1 == state2.T1 && - state->T2 == state2.T2 && - state->T3 == state2.T3 && - state->reset == state2.reset) - return; - /* store LSE type 1 */ - put_marker_byteu(pb, LSE); - bytestream2_put_be16u(pb, 13); - bytestream2_put_byteu(pb, 1); - bytestream2_put_be16u(pb, state->maxval); - bytestream2_put_be16u(pb, state->T1); - bytestream2_put_be16u(pb, state->T2); - bytestream2_put_be16u(pb, state->T3); - bytestream2_put_be16u(pb, state->reset); -} - -static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt, - const AVFrame *pict, int *got_packet) -{ - JPEGLSContext *ctx = avctx->priv_data; - const AVFrame *const p = pict; - PutByteContext pb; - PutBitContext pb2; - GetBitContext gb; - const uint8_t *in; - uint8_t *last = NULL; - JLSState state = { 0 }; - size_t size; - int i, ret, size_in_bits; - int comps; - - last = av_mallocz(FFABS(p->linesize[0])); - if (!last) - return AVERROR(ENOMEM); - - init_put_bits(&pb2, ctx->buf, ctx->size); - - comps = ctx->comps; - /* initialize JPEG-LS state from JPEG parameters */ - state.near = ctx->pred; - state.bpp = (avctx->pix_fmt == AV_PIX_FMT_GRAY16) ? 16 : 8; - ff_jpegls_reset_coding_parameters(&state, 0); - ff_jpegls_init_state(&state); - - in = p->data[0]; - if (avctx->pix_fmt == AV_PIX_FMT_GRAY8) { - int t = 0; - - for (i = 0; i < avctx->height; i++) { - int last0 = last[0]; - ls_encode_line(&state, &pb2, last, in, t, avctx->width, 1, 0, 8); - t = last0; - in += p->linesize[0]; - } - } else if (avctx->pix_fmt == AV_PIX_FMT_GRAY16) { - int t = 0; - - for (i = 0; i < avctx->height; i++) { - int last0 = *((uint16_t *)last); - ls_encode_line(&state, &pb2, last, in, t, avctx->width, 1, 0, 16); - t = last0; - in += p->linesize[0]; - } - } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) { - int j, width; - int Rc[3] = { 0, 0, 0 }; - - width = avctx->width * 3; - for (i = 0; i < avctx->height; i++) { - for (j = 0; j < 3; j++) { - int last0 = last[j]; - ls_encode_line(&state, &pb2, last + j, in + j, Rc[j], - width, 3, j, 8); - Rc[j] = last0; - } - in += p->linesize[0]; - } - } else if (avctx->pix_fmt == AV_PIX_FMT_BGR24) { - int j, width; - int Rc[3] = { 0, 0, 0 }; - - width = avctx->width * 3; - for (i = 0; i < avctx->height; i++) { - for (j = 2; j >= 0; j--) { - int last0 = last[j]; - ls_encode_line(&state, &pb2, last + j, in + j, Rc[j], - width, 3, j, 8); - Rc[j] = last0; - } - in += p->linesize[0]; - } - } - av_free(last); - /* Now the actual image data has been written, which enables us to estimate - * the needed packet size: For every 15 input bits, an escape bit might be - * added below; and if put_bits_count % 15 is >= 8, then another bit might - * be added. - * Furthermore the specification says that after doing 0xff escaping unused - * bits in the last byte must be set to 0, so just append 7 "optional" zero - * bits to avoid special-casing. This also simplifies the size calculation: - * Properly rounding up is now automatically baked-in. */ - put_bits(&pb2, 7, 0); - /* Make sure that the bit count + padding is representable in an int; - necessary for put_bits_count() as well as for using a GetBitContext. */ - if (put_bytes_count(&pb2, 0) > INT_MAX / 8 - AV_INPUT_BUFFER_PADDING_SIZE) - return AVERROR(ERANGE); - size_in_bits = put_bits_count(&pb2); - flush_put_bits(&pb2); - size = size_in_bits * 2U / 15; - size += 2 + 2 + 2 + 1 + 2 + 2 + 1 + comps * (1 + 1 + 1) + 2 + 2 + 1 - + comps * (1 + 1) + 1 + 1 + 1; /* Header */ - size += 2 + 2 + 1 + 2 + 2 + 2 + 2 + 2; /* LSE */ - size += 2; /* EOI */ - if ((ret = ff_get_encode_buffer(avctx, pkt, size, 0)) < 0) - return ret; - - bytestream2_init_writer(&pb, pkt->data, pkt->size); - - /* write our own JPEG header, can't use mjpeg_picture_header */ - put_marker_byteu(&pb, SOI); - put_marker_byteu(&pb, SOF48); - bytestream2_put_be16u(&pb, 8 + comps * 3); // header size depends on components - bytestream2_put_byteu(&pb, (avctx->pix_fmt == AV_PIX_FMT_GRAY16) ? 16 : 8); // bpp - bytestream2_put_be16u(&pb, avctx->height); - bytestream2_put_be16u(&pb, avctx->width); - bytestream2_put_byteu(&pb, comps); // components - for (i = 1; i <= comps; i++) { - bytestream2_put_byteu(&pb, i); // component ID - bytestream2_put_byteu(&pb, 0x11); // subsampling: none - bytestream2_put_byteu(&pb, 0); // Tiq, used by JPEG-LS ext - } - - put_marker_byteu(&pb, SOS); - bytestream2_put_be16u(&pb, 6 + comps * 2); - bytestream2_put_byteu(&pb, comps); - for (i = 1; i <= comps; i++) { - bytestream2_put_byteu(&pb, i); // component ID - bytestream2_put_byteu(&pb, 0); // mapping index: none - } - bytestream2_put_byteu(&pb, ctx->pred); - bytestream2_put_byteu(&pb, (comps > 1) ? 1 : 0); // interleaving: 0 - plane, 1 - line - bytestream2_put_byteu(&pb, 0); // point transform: none - - ls_store_lse(&state, &pb); - - /* do escape coding */ - init_get_bits(&gb, pb2.buf, size_in_bits); - size_in_bits -= 7; - while (get_bits_count(&gb) < size_in_bits) { - int v; - v = get_bits(&gb, 8); - bytestream2_put_byteu(&pb, v); - if (v == 0xFF) { - v = get_bits(&gb, 7); - bytestream2_put_byteu(&pb, v); - } - } - - /* End of image */ - put_marker_byteu(&pb, EOI); - - av_shrink_packet(pkt, bytestream2_tell_p(&pb)); - *got_packet = 1; - return 0; -} - -static av_cold int encode_jpegls_init(AVCodecContext *avctx) -{ - JPEGLSContext *ctx = avctx->priv_data; - size_t size; - - if ((avctx->width | avctx->height) > UINT16_MAX) { - av_log(avctx, AV_LOG_ERROR, "Dimensions exceeding 65535x65535\n"); - return AVERROR(EINVAL); - } - if (avctx->pix_fmt == AV_PIX_FMT_GRAY8 || - avctx->pix_fmt == AV_PIX_FMT_GRAY16) - ctx->comps = 1; - else - ctx->comps = 3; - size = AV_INPUT_BUFFER_MIN_SIZE; - /* INT_MAX due to PutBit-API. */ - if (avctx->width * (unsigned)avctx->height > (INT_MAX - size) / 4 / ctx->comps) - return AVERROR(ERANGE); - size += 4 * ctx->comps * avctx->width * avctx->height; - ctx->size = size; - ctx->buf = av_malloc(size + AV_INPUT_BUFFER_PADDING_SIZE); - if (!ctx->buf) - return AVERROR(ENOMEM); - - return 0; -} - -static av_cold int encode_jpegls_close(AVCodecContext *avctx) -{ - JPEGLSContext *ctx = avctx->priv_data; - - av_freep(&ctx->buf); - return 0; -} - -#define OFFSET(x) offsetof(JPEGLSContext, x) -#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM -static const AVOption options[] = { -{ "pred", "Prediction method", OFFSET(pred), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 2, VE, "pred" }, - { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, "pred" }, - { "plane", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, "pred" }, - { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, INT_MIN, INT_MAX, VE, "pred" }, - - { NULL}, -}; - -static const AVClass jpegls_class = { - .class_name = "jpegls", - .item_name = av_default_item_name, - .option = options, - .version = LIBAVUTIL_VERSION_INT, -}; - -const FFCodec ff_jpegls_encoder = { - .p.name = "jpegls", - CODEC_LONG_NAME("JPEG-LS"), - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_JPEGLS, - .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | - AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE, - .priv_data_size = sizeof(JPEGLSContext), - .p.priv_class = &jpegls_class, - .init = encode_jpegls_init, - FF_CODEC_ENCODE_CB(encode_picture_ls), - .close = encode_jpegls_close, - .p.pix_fmts = (const enum AVPixelFormat[]) { - AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB24, - AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16, - AV_PIX_FMT_NONE - }, - .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/libcodec2.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/libcodec2.c deleted file mode 100644 index 83f68e85c79ede8f0578fb9158897d1b64489065..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/libcodec2.c +++ /dev/null @@ -1,212 +0,0 @@ -/* - * codec2 encoder/decoder using libcodec2 - * Copyright (c) 2017 Tomas Härdin - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include -#include "libavutil/channel_layout.h" -#include "avcodec.h" -#include "libavutil/opt.h" -#include "codec_internal.h" -#include "decode.h" -#include "encode.h" -#include "codec2utils.h" - -typedef struct { - const AVClass *class; - struct CODEC2 *codec; - int mode; -} LibCodec2Context; - -static const AVOption options[] = { - //not AV_OPT_FLAG_DECODING_PARAM since mode should come from the demuxer - //1300 (aka FreeDV 1600) is the most common mode on-the-air, default to it here as well - CODEC2_AVOPTIONS("codec2 mode", LibCodec2Context, 0, 4 /*CODEC2_MODE_1300*/, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_ENCODING_PARAM), - { NULL }, -}; - -static const AVClass libcodec2_enc_class = { - .class_name = "libcodec2 encoder", - .item_name = av_default_item_name, - .option = options, - .version = LIBAVUTIL_VERSION_INT, -}; - -static av_cold int libcodec2_init_common(AVCodecContext *avctx, int mode) -{ - LibCodec2Context *c2 = avctx->priv_data; - //Grab mode name from options, unless it's some weird number. - const char *modename = mode >= 0 && mode <= CODEC2_MODE_MAX ? options[mode+1].name : "?"; - - c2->codec = codec2_create(mode); - if (!c2->codec) { - //Out of memory or unsupported mode. The latter seems most likely, - //but we can't tell for sure with the current API. - goto libcodec2_init_common_error; - } - - avctx->frame_size = codec2_samples_per_frame(c2->codec); - avctx->block_align = (codec2_bits_per_frame(c2->codec) + 7) / 8; - - if (avctx->frame_size <= 0 || avctx->block_align <= 0) { - //codec2_create() may succeed for some modes but still fail at codec2_samples_per_frame() - //example is -mode 700C on libcodec2 0.4 - codec2_destroy(c2->codec); - c2->codec = NULL; - goto libcodec2_init_common_error; - } - - codec2_set_natural_or_gray(c2->codec, 1); - - return 0; - -libcodec2_init_common_error: - av_log(avctx, AV_LOG_ERROR, - "Mode %i (%s) not supported with the linked version of libcodec2\n", - mode, modename); - return AVERROR(EINVAL); -} - -static av_cold int libcodec2_init_decoder(AVCodecContext *avctx) -{ - avctx->sample_rate = 8000; - avctx->sample_fmt = AV_SAMPLE_FMT_S16; - av_channel_layout_uninit(&avctx->ch_layout); - avctx->ch_layout = (AVChannelLayout)AV_CHANNEL_LAYOUT_MONO; - - if (avctx->extradata_size != CODEC2_EXTRADATA_SIZE) { - av_log(avctx, AV_LOG_ERROR, "must have exactly %i bytes of extradata (got %i)\n", - CODEC2_EXTRADATA_SIZE, avctx->extradata_size); - return AVERROR_INVALIDDATA; - } - - return libcodec2_init_common(avctx, codec2_mode_from_extradata(avctx->extradata)); -} - -static av_cold int libcodec2_init_encoder(AVCodecContext *avctx) -{ - LibCodec2Context *c2 = avctx->priv_data; - - //will need to be smarter once we get wideband support - if (avctx->sample_rate != 8000 || - avctx->sample_fmt != AV_SAMPLE_FMT_S16) { - av_log(avctx, AV_LOG_ERROR, "only 8 kHz 16-bit mono allowed\n"); - return AVERROR(EINVAL); - } - - avctx->extradata = av_mallocz(CODEC2_EXTRADATA_SIZE + AV_INPUT_BUFFER_PADDING_SIZE); - if (!avctx->extradata) { - return AVERROR(ENOMEM); - } - - avctx->extradata_size = CODEC2_EXTRADATA_SIZE; - codec2_make_extradata(avctx->extradata, c2->mode); - - return libcodec2_init_common(avctx, c2->mode); -} - -static av_cold int libcodec2_close(AVCodecContext *avctx) -{ - LibCodec2Context *c2 = avctx->priv_data; - - codec2_destroy(c2->codec); - return 0; -} - -static int libcodec2_decode(AVCodecContext *avctx, AVFrame *frame, - int *got_frame_ptr, AVPacket *pkt) -{ - LibCodec2Context *c2 = avctx->priv_data; - int ret, nframes, i; - const uint8_t *input; - int16_t *output; - - nframes = pkt->size / avctx->block_align; - frame->nb_samples = avctx->frame_size * nframes; - - ret = ff_get_buffer(avctx, frame, 0); - if (ret < 0) { - return ret; - } - - input = pkt->data; - output = (int16_t *)frame->data[0]; - - for (i = 0; i < nframes; i++) { - codec2_decode(c2->codec, output, input); - input += avctx->block_align; - output += avctx->frame_size; - } - - *got_frame_ptr = nframes > 0; - return nframes * avctx->block_align; -} - -static int libcodec2_encode(AVCodecContext *avctx, AVPacket *avpkt, - const AVFrame *frame, int *got_packet_ptr) -{ - LibCodec2Context *c2 = avctx->priv_data; - int16_t *samples = (int16_t *)frame->data[0]; - - int ret = ff_get_encode_buffer(avctx, avpkt, avctx->block_align, 0); - if (ret < 0) { - return ret; - } - - codec2_encode(c2->codec, avpkt->data, samples); - *got_packet_ptr = 1; - - return 0; -} - -const FFCodec ff_libcodec2_decoder = { - .p.name = "libcodec2", - CODEC_LONG_NAME("codec2 decoder using libcodec2"), - .p.type = AVMEDIA_TYPE_AUDIO, - .p.id = AV_CODEC_ID_CODEC2, - .p.capabilities = AV_CODEC_CAP_CHANNEL_CONF, - .p.supported_samplerates = (const int[]){ 8000, 0 }, - .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }, - .p.ch_layouts = (const AVChannelLayout[]) { AV_CHANNEL_LAYOUT_MONO, { 0 } }, - .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE, - .priv_data_size = sizeof(LibCodec2Context), - .init = libcodec2_init_decoder, - .close = libcodec2_close, - FF_CODEC_DECODE_CB(libcodec2_decode), - CODEC_OLD_CHANNEL_LAYOUTS(AV_CH_LAYOUT_MONO) -}; - -const FFCodec ff_libcodec2_encoder = { - .p.name = "libcodec2", - CODEC_LONG_NAME("codec2 encoder using libcodec2"), - .p.type = AVMEDIA_TYPE_AUDIO, - .p.id = AV_CODEC_ID_CODEC2, - .p.capabilities = AV_CODEC_CAP_DR1 | - AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE, - .p.supported_samplerates = (const int[]){ 8000, 0 }, - .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }, - .p.ch_layouts = (const AVChannelLayout[]) { AV_CHANNEL_LAYOUT_MONO, { 0 } }, - .p.priv_class = &libcodec2_enc_class, - .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE, - .priv_data_size = sizeof(LibCodec2Context), - .init = libcodec2_init_encoder, - .close = libcodec2_close, - FF_CODEC_ENCODE_CB(libcodec2_encode), - CODEC_OLD_CHANNEL_LAYOUTS(AV_CH_LAYOUT_MONO) -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/loongarch/idctdsp_loongarch.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/loongarch/idctdsp_loongarch.h deleted file mode 100644 index cae8e7af5847f91e6b7cc1051eb9e6e2f6b53f98..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/loongarch/idctdsp_loongarch.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2021 Loongson Technology Corporation Limited - * Contributed by Hao Chen - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_LOONGARCH_IDCTDSP_LOONGARCH_H -#define AVCODEC_LOONGARCH_IDCTDSP_LOONGARCH_H - -#include -#include "libavcodec/mpegvideo.h" - -void ff_simple_idct_lasx(int16_t *block); -void ff_simple_idct_put_lasx(uint8_t *dest, ptrdiff_t stride_dst, int16_t *block); -void ff_simple_idct_add_lasx(uint8_t *dest, ptrdiff_t stride_dst, int16_t *block); -void ff_put_pixels_clamped_lasx(const int16_t *block, - uint8_t *av_restrict pixels, - ptrdiff_t line_size); -void ff_put_signed_pixels_clamped_lasx(const int16_t *block, - uint8_t *av_restrict pixels, - ptrdiff_t line_size); -void ff_add_pixels_clamped_lasx(const int16_t *block, - uint8_t *av_restrict pixels, - ptrdiff_t line_size); - -#endif /* AVCODEC_LOONGARCH_IDCTDSP_LOONGARCH_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/loongarch/vp9_intra_lsx.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/loongarch/vp9_intra_lsx.c deleted file mode 100644 index d3f32646f358393e7b9544dc8a2eae50cad3d664..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/loongarch/vp9_intra_lsx.c +++ /dev/null @@ -1,653 +0,0 @@ -/* - * Copyright (c) 2021 Loongson Technology Corporation Limited - * Contributed by Hao Chen - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavcodec/vp9dsp.h" -#include "libavutil/loongarch/loongson_intrinsics.h" -#include "vp9dsp_loongarch.h" - -#define LSX_ST_8(_dst0, _dst1, _dst2, _dst3, _dst4, \ - _dst5, _dst6, _dst7, _dst, _stride, \ - _stride2, _stride3, _stride4) \ -{ \ - __lsx_vst(_dst0, _dst, 0); \ - __lsx_vstx(_dst1, _dst, _stride); \ - __lsx_vstx(_dst2, _dst, _stride2); \ - __lsx_vstx(_dst3, _dst, _stride3); \ - _dst += _stride4; \ - __lsx_vst(_dst4, _dst, 0); \ - __lsx_vstx(_dst5, _dst, _stride); \ - __lsx_vstx(_dst6, _dst, _stride2); \ - __lsx_vstx(_dst7, _dst, _stride3); \ -} - -#define LSX_ST_8X16(_dst0, _dst1, _dst2, _dst3, _dst4, \ - _dst5, _dst6, _dst7, _dst, _stride) \ -{ \ - __lsx_vst(_dst0, _dst, 0); \ - __lsx_vst(_dst0, _dst, 16); \ - _dst += _stride; \ - __lsx_vst(_dst1, _dst, 0); \ - __lsx_vst(_dst1, _dst, 16); \ - _dst += _stride; \ - __lsx_vst(_dst2, _dst, 0); \ - __lsx_vst(_dst2, _dst, 16); \ - _dst += _stride; \ - __lsx_vst(_dst3, _dst, 0); \ - __lsx_vst(_dst3, _dst, 16); \ - _dst += _stride; \ - __lsx_vst(_dst4, _dst, 0); \ - __lsx_vst(_dst4, _dst, 16); \ - _dst += _stride; \ - __lsx_vst(_dst5, _dst, 0); \ - __lsx_vst(_dst5, _dst, 16); \ - _dst += _stride; \ - __lsx_vst(_dst6, _dst, 0); \ - __lsx_vst(_dst6, _dst, 16); \ - _dst += _stride; \ - __lsx_vst(_dst7, _dst, 0); \ - __lsx_vst(_dst7, _dst, 16); \ - _dst += _stride; \ -} - -void ff_vert_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *left, - const uint8_t *src) -{ - __m128i src0; - ptrdiff_t stride2 = dst_stride << 1; - ptrdiff_t stride3 = stride2 + dst_stride; - ptrdiff_t stride4 = stride2 << 1; - src0 = __lsx_vld(src, 0); - LSX_ST_8(src0, src0, src0, src0, src0, src0, src0, src0, dst, - dst_stride, stride2, stride3, stride4); - dst += stride4; - LSX_ST_8(src0, src0, src0, src0, src0, src0, src0, src0, dst, - dst_stride, stride2, stride3, stride4); -} - -void ff_vert_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *left, - const uint8_t *src) -{ - uint32_t row; - __m128i src0, src1; - - DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src1); - for (row = 32; row--;) { - __lsx_vst(src0, dst, 0); - __lsx_vst(src1, dst, 16); - dst += dst_stride; - } -} - -void ff_hor_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src, - const uint8_t *top) -{ - __m128i src0, src1, src2, src3, src4, src5, src6, src7; - __m128i src8, src9, src10, src11, src12, src13, src14, src15; - ptrdiff_t stride2 = dst_stride << 1; - ptrdiff_t stride3 = stride2 + dst_stride; - ptrdiff_t stride4 = stride2 << 1; - - src15 = __lsx_vldrepl_b(src, 0); - src14 = __lsx_vldrepl_b(src, 1); - src13 = __lsx_vldrepl_b(src, 2); - src12 = __lsx_vldrepl_b(src, 3); - src11 = __lsx_vldrepl_b(src, 4); - src10 = __lsx_vldrepl_b(src, 5); - src9 = __lsx_vldrepl_b(src, 6); - src8 = __lsx_vldrepl_b(src, 7); - src7 = __lsx_vldrepl_b(src, 8); - src6 = __lsx_vldrepl_b(src, 9); - src5 = __lsx_vldrepl_b(src, 10); - src4 = __lsx_vldrepl_b(src, 11); - src3 = __lsx_vldrepl_b(src, 12); - src2 = __lsx_vldrepl_b(src, 13); - src1 = __lsx_vldrepl_b(src, 14); - src0 = __lsx_vldrepl_b(src, 15); - LSX_ST_8(src0, src1, src2, src3, src4, src5, src6, src7, dst, - dst_stride, stride2, stride3, stride4); - dst += stride4; - LSX_ST_8(src8, src9, src10, src11, src12, src13, src14, src15, dst, - dst_stride, stride2, stride3, stride4); -} - -void ff_hor_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src, - const uint8_t *top) -{ - __m128i src0, src1, src2, src3, src4, src5, src6, src7; - __m128i src8, src9, src10, src11, src12, src13, src14, src15; - __m128i src16, src17, src18, src19, src20, src21, src22, src23; - __m128i src24, src25, src26, src27, src28, src29, src30, src31; - - src31 = __lsx_vldrepl_b(src, 0); - src30 = __lsx_vldrepl_b(src, 1); - src29 = __lsx_vldrepl_b(src, 2); - src28 = __lsx_vldrepl_b(src, 3); - src27 = __lsx_vldrepl_b(src, 4); - src26 = __lsx_vldrepl_b(src, 5); - src25 = __lsx_vldrepl_b(src, 6); - src24 = __lsx_vldrepl_b(src, 7); - src23 = __lsx_vldrepl_b(src, 8); - src22 = __lsx_vldrepl_b(src, 9); - src21 = __lsx_vldrepl_b(src, 10); - src20 = __lsx_vldrepl_b(src, 11); - src19 = __lsx_vldrepl_b(src, 12); - src18 = __lsx_vldrepl_b(src, 13); - src17 = __lsx_vldrepl_b(src, 14); - src16 = __lsx_vldrepl_b(src, 15); - src15 = __lsx_vldrepl_b(src, 16); - src14 = __lsx_vldrepl_b(src, 17); - src13 = __lsx_vldrepl_b(src, 18); - src12 = __lsx_vldrepl_b(src, 19); - src11 = __lsx_vldrepl_b(src, 20); - src10 = __lsx_vldrepl_b(src, 21); - src9 = __lsx_vldrepl_b(src, 22); - src8 = __lsx_vldrepl_b(src, 23); - src7 = __lsx_vldrepl_b(src, 24); - src6 = __lsx_vldrepl_b(src, 25); - src5 = __lsx_vldrepl_b(src, 26); - src4 = __lsx_vldrepl_b(src, 27); - src3 = __lsx_vldrepl_b(src, 28); - src2 = __lsx_vldrepl_b(src, 29); - src1 = __lsx_vldrepl_b(src, 30); - src0 = __lsx_vldrepl_b(src, 31); - LSX_ST_8X16(src0, src1, src2, src3, src4, src5, src6, src7, - dst, dst_stride); - LSX_ST_8X16(src8, src9, src10, src11, src12, src13, src14, src15, - dst, dst_stride); - LSX_ST_8X16(src16, src17, src18, src19, src20, src21, src22, src23, - dst, dst_stride); - LSX_ST_8X16(src24, src25, src26, src27, src28, src29, src30, src31, - dst, dst_stride); -} - -void ff_dc_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src_left, - const uint8_t *src_top) -{ - __m128i tmp0, tmp1, dst0; - - tmp0 = __lsx_vldrepl_w(src_top, 0); - tmp1 = __lsx_vldrepl_w(src_left, 0); - dst0 = __lsx_vilvl_w(tmp1, tmp0); - dst0 = __lsx_vhaddw_hu_bu(dst0, dst0); - dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); - dst0 = __lsx_vhaddw_du_wu(dst0, dst0); - dst0 = __lsx_vsrari_w(dst0, 3); - dst0 = __lsx_vshuf4i_b(dst0, 0); - __lsx_vstelm_w(dst0, dst, 0, 0); - dst += dst_stride; - __lsx_vstelm_w(dst0, dst, 0, 0); - dst += dst_stride; - __lsx_vstelm_w(dst0, dst, 0, 0); - dst += dst_stride; - __lsx_vstelm_w(dst0, dst, 0, 0); -} - -#define INTRA_DC_TL_4X4(dir) \ -void ff_dc_##dir##_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride, \ - const uint8_t *left, \ - const uint8_t *top) \ -{ \ - __m128i tmp0, dst0; \ - \ - tmp0 = __lsx_vldrepl_w(dir, 0); \ - dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0); \ - dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); \ - dst0 = __lsx_vsrari_w(dst0, 2); \ - dst0 = __lsx_vshuf4i_b(dst0, 0); \ - __lsx_vstelm_w(dst0, dst, 0, 0); \ - dst += dst_stride; \ - __lsx_vstelm_w(dst0, dst, 0, 0); \ - dst += dst_stride; \ - __lsx_vstelm_w(dst0, dst, 0, 0); \ - dst += dst_stride; \ - __lsx_vstelm_w(dst0, dst, 0, 0); \ -} -INTRA_DC_TL_4X4(top); -INTRA_DC_TL_4X4(left); - -void ff_dc_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src_left, - const uint8_t *src_top) -{ - __m128i tmp0, tmp1, dst0; - - tmp0 = __lsx_vldrepl_d(src_top, 0); - tmp1 = __lsx_vldrepl_d(src_left, 0); - dst0 = __lsx_vilvl_d(tmp1, tmp0); - dst0 = __lsx_vhaddw_hu_bu(dst0, dst0); - dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); - dst0 = __lsx_vhaddw_du_wu(dst0, dst0); - dst0 = __lsx_vhaddw_qu_du(dst0, dst0); - dst0 = __lsx_vsrari_w(dst0, 4); - dst0 = __lsx_vreplvei_b(dst0, 0); - __lsx_vstelm_d(dst0, dst, 0, 0); - dst += dst_stride; - __lsx_vstelm_d(dst0, dst, 0, 0); - dst += dst_stride; - __lsx_vstelm_d(dst0, dst, 0, 0); - dst += dst_stride; - __lsx_vstelm_d(dst0, dst, 0, 0); - dst += dst_stride; - __lsx_vstelm_d(dst0, dst, 0, 0); - dst += dst_stride; - __lsx_vstelm_d(dst0, dst, 0, 0); - dst += dst_stride; - __lsx_vstelm_d(dst0, dst, 0, 0); - dst += dst_stride; - __lsx_vstelm_d(dst0, dst, 0, 0); -} - -#define INTRA_DC_TL_8X8(dir) \ -void ff_dc_##dir##_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride, \ - const uint8_t *left, \ - const uint8_t *top) \ -{ \ - __m128i tmp0, dst0; \ - \ - tmp0 = __lsx_vldrepl_d(dir, 0); \ - dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0); \ - dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); \ - dst0 = __lsx_vhaddw_du_wu(dst0, dst0); \ - dst0 = __lsx_vsrari_w(dst0, 3); \ - dst0 = __lsx_vreplvei_b(dst0, 0); \ - __lsx_vstelm_d(dst0, dst, 0, 0); \ - dst += dst_stride; \ - __lsx_vstelm_d(dst0, dst, 0, 0); \ - dst += dst_stride; \ - __lsx_vstelm_d(dst0, dst, 0, 0); \ - dst += dst_stride; \ - __lsx_vstelm_d(dst0, dst, 0, 0); \ - dst += dst_stride; \ - __lsx_vstelm_d(dst0, dst, 0, 0); \ - dst += dst_stride; \ - __lsx_vstelm_d(dst0, dst, 0, 0); \ - dst += dst_stride; \ - __lsx_vstelm_d(dst0, dst, 0, 0); \ - dst += dst_stride; \ - __lsx_vstelm_d(dst0, dst, 0, 0); \ -} - -INTRA_DC_TL_8X8(top); -INTRA_DC_TL_8X8(left); - -void ff_dc_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src_left, const uint8_t *src_top) -{ - __m128i tmp0, tmp1, dst0; - ptrdiff_t stride2 = dst_stride << 1; - ptrdiff_t stride3 = stride2 + dst_stride; - ptrdiff_t stride4 = stride2 << 1; - - tmp0 = __lsx_vld(src_top, 0); - tmp1 = __lsx_vld(src_left, 0); - DUP2_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp0, tmp1); - dst0 = __lsx_vadd_h(tmp0, tmp1); - dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); - dst0 = __lsx_vhaddw_du_wu(dst0, dst0); - dst0 = __lsx_vhaddw_qu_du(dst0, dst0); - dst0 = __lsx_vsrari_w(dst0, 5); - dst0 = __lsx_vreplvei_b(dst0, 0); - LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst, - dst_stride, stride2, stride3, stride4); - dst += stride4; - LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst, - dst_stride, stride2, stride3, stride4); -} - -#define INTRA_DC_TL_16X16(dir) \ -void ff_dc_##dir##_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, \ - const uint8_t *left, \ - const uint8_t *top) \ -{ \ - __m128i tmp0, dst0; \ - ptrdiff_t stride2 = dst_stride << 1; \ - ptrdiff_t stride3 = stride2 + dst_stride; \ - ptrdiff_t stride4 = stride2 << 1; \ - \ - tmp0 = __lsx_vld(dir, 0); \ - dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0); \ - dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); \ - dst0 = __lsx_vhaddw_du_wu(dst0, dst0); \ - dst0 = __lsx_vhaddw_qu_du(dst0, dst0); \ - dst0 = __lsx_vsrari_w(dst0, 4); \ - dst0 = __lsx_vreplvei_b(dst0, 0); \ - LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst, \ - dst_stride, stride2, stride3, stride4); \ - dst += stride4; \ - LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst, \ - dst_stride, stride2, stride3, stride4); \ -} - -INTRA_DC_TL_16X16(top); -INTRA_DC_TL_16X16(left); - -void ff_dc_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src_left, const uint8_t *src_top) -{ - __m128i tmp0, tmp1, tmp2, tmp3, dst0; - - DUP2_ARG2(__lsx_vld, src_top, 0, src_top, 16, tmp0, tmp1); - DUP2_ARG2(__lsx_vld, src_left, 0, src_left, 16, tmp2, tmp3); - DUP4_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp2, tmp2, - tmp3, tmp3, tmp0, tmp1, tmp2, tmp3); - DUP2_ARG2(__lsx_vadd_h, tmp0, tmp1, tmp2, tmp3, tmp0, tmp1); - dst0 = __lsx_vadd_h(tmp0, tmp1); - dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); - dst0 = __lsx_vhaddw_du_wu(dst0, dst0); - dst0 = __lsx_vhaddw_qu_du(dst0, dst0); - dst0 = __lsx_vsrari_w(dst0, 6); - dst0 = __lsx_vreplvei_b(dst0, 0); - LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, - dst, dst_stride); - LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, - dst, dst_stride); - LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, - dst, dst_stride); - LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, - dst, dst_stride); -} - -#define INTRA_DC_TL_32X32(dir) \ -void ff_dc_##dir##_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, \ - const uint8_t *left, \ - const uint8_t *top) \ -{ \ - __m128i tmp0, tmp1, dst0; \ - \ - DUP2_ARG2(__lsx_vld, dir, 0, dir, 16, tmp0, tmp1); \ - DUP2_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp0, tmp1); \ - dst0 = __lsx_vadd_h(tmp0, tmp1); \ - dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); \ - dst0 = __lsx_vhaddw_du_wu(dst0, dst0); \ - dst0 = __lsx_vhaddw_qu_du(dst0, dst0); \ - dst0 = __lsx_vsrari_w(dst0, 5); \ - dst0 = __lsx_vreplvei_b(dst0, 0); \ - LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, \ - dst, dst_stride); \ - LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, \ - dst, dst_stride); \ - LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, \ - dst, dst_stride); \ - LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, \ - dst, dst_stride); \ -} - -INTRA_DC_TL_32X32(top); -INTRA_DC_TL_32X32(left); - -#define INTRA_PREDICT_VALDC_16X16_LSX(val) \ -void ff_dc_##val##_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, \ - const uint8_t *left, const uint8_t *top) \ -{ \ - __m128i out = __lsx_vldi(val); \ - ptrdiff_t stride2 = dst_stride << 1; \ - ptrdiff_t stride3 = stride2 + dst_stride; \ - ptrdiff_t stride4 = stride2 << 1; \ - \ - LSX_ST_8(out, out, out, out, out, out, out, out, dst, \ - dst_stride, stride2, stride3, stride4); \ - dst += stride4; \ - LSX_ST_8(out, out, out, out, out, out, out, out, dst, \ - dst_stride, stride2, stride3, stride4); \ -} - -INTRA_PREDICT_VALDC_16X16_LSX(127); -INTRA_PREDICT_VALDC_16X16_LSX(128); -INTRA_PREDICT_VALDC_16X16_LSX(129); - -#define INTRA_PREDICT_VALDC_32X32_LSX(val) \ -void ff_dc_##val##_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, \ - const uint8_t *left, const uint8_t *top) \ -{ \ - __m128i out = __lsx_vldi(val); \ - \ - LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\ - LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\ - LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\ - LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\ -} - -INTRA_PREDICT_VALDC_32X32_LSX(127); -INTRA_PREDICT_VALDC_32X32_LSX(128); -INTRA_PREDICT_VALDC_32X32_LSX(129); - -void ff_tm_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src_left, const uint8_t *src_top_ptr) -{ - uint8_t top_left = src_top_ptr[-1]; - __m128i tmp0, tmp1, tmp2, tmp3, reg0, reg1; - __m128i src0, src1, src2, src3; - __m128i dst0, dst1, dst2, dst3; - - reg0 = __lsx_vreplgr2vr_h(top_left); - reg1 = __lsx_vld(src_top_ptr, 0); - DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2, src_left, - 3, tmp3, tmp2, tmp1, tmp0); - DUP4_ARG2(__lsx_vilvl_b, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3, reg1, - src0, src1, src2, src3); - DUP4_ARG2(__lsx_vhaddw_hu_bu, src0, src0, src1, src1, src2, src2, src3, - src3, dst0, dst1, dst2, dst3); - DUP4_ARG2(__lsx_vssub_hu, dst0, reg0, dst1, reg0, dst2, reg0, dst3, reg0, - dst0, dst1, dst2, dst3); - DUP4_ARG2(__lsx_vsat_hu, dst0, 7, dst1, 7, dst2, 7, dst3, 7, - dst0, dst1, dst2, dst3); - DUP2_ARG2(__lsx_vpickev_b, dst1, dst0, dst3, dst2, dst0, dst1); - __lsx_vstelm_w(dst0, dst, 0, 0); - dst += dst_stride; - __lsx_vstelm_w(dst0, dst, 0, 2); - dst += dst_stride; - __lsx_vstelm_w(dst1, dst, 0, 0); - dst += dst_stride; - __lsx_vstelm_w(dst1, dst, 0, 2); -} - -void ff_tm_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src_left, const uint8_t *src_top_ptr) -{ - uint8_t top_left = src_top_ptr[-1]; - __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - __m128i src0, src1, src2, src3, src4, src5, src6, src7; - __m128i reg0, reg1; - - reg0 = __lsx_vreplgr2vr_h(top_left); - reg1 = __lsx_vld(src_top_ptr, 0); - DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2, src_left, - 3, tmp7, tmp6, tmp5, tmp4); - DUP4_ARG2(__lsx_vldrepl_b, src_left, 4, src_left, 5, src_left, 6, src_left, - 7, tmp3, tmp2, tmp1, tmp0); - DUP4_ARG2(__lsx_vilvl_b, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3, reg1, - src0, src1, src2, src3); - DUP4_ARG2(__lsx_vilvl_b, tmp4, reg1, tmp5, reg1, tmp6, reg1, tmp7, reg1, - src4, src5, src6, src7); - DUP4_ARG2(__lsx_vhaddw_hu_bu, src0, src0, src1, src1, src2, src2, src3, - src3, src0, src1, src2, src3); - DUP4_ARG2(__lsx_vhaddw_hu_bu, src4, src4, src5, src5, src6, src6, src7, - src7, src4, src5, src6, src7); - DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0, - src0, src1, src2, src3); - DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0, - src4, src5, src6, src7); - DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7, - src0, src1, src2, src3); - DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7, - src4, src5, src6, src7); - DUP4_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, src5, src4, src7, src6, - src0, src1, src2, src3); - __lsx_vstelm_d(src0, dst, 0, 0); - dst += dst_stride; - __lsx_vstelm_d(src0, dst, 0, 1); - dst += dst_stride; - __lsx_vstelm_d(src1, dst, 0, 0); - dst += dst_stride; - __lsx_vstelm_d(src1, dst, 0, 1); - dst += dst_stride; - __lsx_vstelm_d(src2, dst, 0, 0); - dst += dst_stride; - __lsx_vstelm_d(src2, dst, 0, 1); - dst += dst_stride; - __lsx_vstelm_d(src3, dst, 0, 0); - dst += dst_stride; - __lsx_vstelm_d(src3, dst, 0, 1); -} - -void ff_tm_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src_left, const uint8_t *src_top_ptr) -{ - uint8_t top_left = src_top_ptr[-1]; - __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15; - __m128i src0, src1, src2, src3, src4, src5, src6, src7; - __m128i reg0, reg1; - ptrdiff_t stride2 = dst_stride << 1; - ptrdiff_t stride3 = stride2 + dst_stride; - ptrdiff_t stride4 = stride2 << 1; - - reg0 = __lsx_vreplgr2vr_h(top_left); - reg1 = __lsx_vld(src_top_ptr, 0); - DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2, src_left, - 3, tmp15, tmp14, tmp13, tmp12); - DUP4_ARG2(__lsx_vldrepl_b, src_left, 4, src_left, 5, src_left, 6, src_left, - 7, tmp11, tmp10, tmp9, tmp8); - DUP4_ARG2(__lsx_vldrepl_b, src_left, 8, src_left, 9, src_left, 10, - src_left, 11, tmp7, tmp6, tmp5, tmp4); - DUP4_ARG2(__lsx_vldrepl_b, src_left, 12, src_left, 13, src_left, 14, - src_left, 15, tmp3, tmp2, tmp1, tmp0); - DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3, - reg1, src0, src1, src2, src3); - DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3, - reg1, src4, src5, src6, src7); - DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0, - src0, src1, src2, src3); - DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0, - src4, src5, src6, src7); - DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7, - src0, src1, src2, src3); - DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7, - src4, src5, src6, src7); - DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7, src3, - tmp0, tmp1, tmp2, tmp3); - DUP4_ARG2(__lsx_vaddwev_h_bu, tmp4, reg1, tmp5, reg1, tmp6, reg1, tmp7, - reg1, src0, src1, src2, src3); - DUP4_ARG2(__lsx_vaddwod_h_bu, tmp4, reg1, tmp5, reg1, tmp6, reg1, tmp7, - reg1, src4, src5, src6, src7); - DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0, - src0, src1, src2, src3); - DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0, - src4, src5, src6, src7); - DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7, - src0, src1, src2, src3); - DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7, - src4, src5, src6, src7); - DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7, src3, - tmp4, tmp5, tmp6, tmp7); - DUP4_ARG2(__lsx_vaddwev_h_bu, tmp8, reg1, tmp9, reg1, tmp10, reg1, tmp11, - reg1, src0, src1, src2, src3); - DUP4_ARG2(__lsx_vaddwod_h_bu, tmp8, reg1, tmp9, reg1, tmp10, reg1, tmp11, - reg1, src4, src5, src6, src7); - DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0, - src0, src1, src2, src3); - DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0, - src4, src5, src6, src7); - DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7, - src0, src1, src2, src3); - DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7, - src4, src5, src6, src7); - DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7, src3, - tmp8, tmp9, tmp10, tmp11); - DUP4_ARG2(__lsx_vaddwev_h_bu, tmp12, reg1, tmp13, reg1, tmp14, reg1, - tmp15, reg1, src0, src1, src2, src3); - DUP4_ARG2(__lsx_vaddwod_h_bu, tmp12, reg1, tmp13, reg1, tmp14, reg1, - tmp15, reg1, src4, src5, src6, src7); - DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0, - src0, src1, src2, src3); - DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0, - src4, src5, src6, src7); - DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7, - src0, src1, src2, src3); - DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7, - src4, src5, src6, src7); - DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7, src3, - tmp12, tmp13, tmp14, tmp15); - LSX_ST_8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, dst, - dst_stride, stride2, stride3, stride4); - dst += stride4; - LSX_ST_8(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, dst, - dst_stride, stride2, stride3, stride4); -} - -void ff_tm_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, - const uint8_t *src_left, const uint8_t *src_top_ptr) -{ - uint8_t top_left = src_top_ptr[-1]; - uint32_t loop_cnt; - __m128i tmp0, tmp1, tmp2, tmp3, reg0, reg1, reg2; - __m128i src0, src1, src2, src3, src4, src5, src6, src7; - __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; - - reg0 = __lsx_vreplgr2vr_h(top_left); - DUP2_ARG2(__lsx_vld, src_top_ptr, 0, src_top_ptr, 16, reg1, reg2); - - src_left += 28; - for (loop_cnt = 8; loop_cnt--;) { - DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2, - src_left, 3, tmp3, tmp2, tmp1, tmp0); - src_left -= 4; - DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1, - tmp3, reg1, src0, src1, src2, src3); - DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1, - tmp3, reg1, src4, src5, src6, src7); - DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, - reg0, src0, src1, src2, src3); - DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, - reg0, src4, src5, src6, src7); - DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg2, tmp1, reg2, tmp2, reg2, - tmp3, reg2, dst0, dst1, dst2, dst3); - DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg2, tmp1, reg2, tmp2, reg2, - tmp3, reg2, dst4, dst5, dst6, dst7); - DUP4_ARG2(__lsx_vssub_hu, dst0, reg0, dst1, reg0, dst2, reg0, dst3, - reg0, dst0, dst1, dst2, dst3); - DUP4_ARG2(__lsx_vssub_hu, dst4, reg0, dst5, reg0, dst6, reg0, dst7, - reg0, dst4, dst5, dst6, dst7); - DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7, - src0, src1, src2, src3); - DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7, - src4, src5, src6, src7); - DUP4_ARG2(__lsx_vsat_hu, dst0, 7, dst1, 7, dst2, 7, dst3, 7, - dst0, dst1, dst2, dst3); - DUP4_ARG2(__lsx_vsat_hu, dst4, 7, dst5, 7, dst6, 7, dst7, 7, - dst4, dst5, dst6, dst7); - DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7, - src3, src0, src1, src2, src3); - DUP4_ARG2(__lsx_vpackev_b, dst4, dst0, dst5, dst1, dst6, dst2, dst7, - dst3, dst0, dst1, dst2, dst3); - __lsx_vst(src0, dst, 0); - __lsx_vst(dst0, dst, 16); - dst += dst_stride; - __lsx_vst(src1, dst, 0); - __lsx_vst(dst1, dst, 16); - dst += dst_stride; - __lsx_vst(src2, dst, 0); - __lsx_vst(dst2, dst, 16); - dst += dst_stride; - __lsx_vst(src3, dst, 0); - __lsx_vst(dst3, dst, 16); - dst += dst_stride; - } -} diff --git a/spaces/congsaPfin/Manga-OCR/logs/Barbie Dreamhouse Adventures A Game for Fashion Lovers.md b/spaces/congsaPfin/Manga-OCR/logs/Barbie Dreamhouse Adventures A Game for Fashion Lovers.md deleted file mode 100644 index 09b22b2872b7702b91811d9fce3f74ccdeada34d..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Barbie Dreamhouse Adventures A Game for Fashion Lovers.md +++ /dev/null @@ -1,154 +0,0 @@ - -

Barbie Dreamhouse Adventures Game Download: A Fun and Creative Simulation Game for Kids

-

If you are looking for a fun and creative simulation game for kids, you might want to check out Barbie Dreamhouse Adventures. This game lets you create your own Barbie Dreamhouse experience, where you can explore and customize Barbie's dreamhouse, join her on exciting adventures, and have fun with her friends and family. In this article, we will tell you what Barbie Dreamhouse Adventures is, how to download it, what are its features, what are its reviews, and what are some tips for playing it.

-

barbie dreamhouse adventures game download


Download > https://urlca.com/2uOb4N



-

What is Barbie Dreamhouse Adventures?

-

Barbie Dreamhouse Adventures is a simulation game developed by Budge Studios. It is based on the animated series of the same name, which follows Barbie and her friends as they live in a dreamhouse in Malibu. The game allows you to create your own version of the dreamhouse, where you can design every room with your own style. You can also join Barbie and her friends on various fun activities, such as baking, cooking, dancing, makeovers, fashion, nail salon, hair salon, mini games, epic pool parties, and more. You can also explore Malibu with Barbie's pink convertible or dress up in fashion-forward looks to get picture ready. The game also features Barbie's family, including her sisters Skipper, Stacie, and Chelsea, and her parents Mr. and Mrs. Roberts. You can follow them on exciting adventures in the dreamhouse or outside.

-

A sandbox-style game where you can explore and customize Barbie's dreamhouse

-

One of the main features of Barbie Dreamhouse Adventures is that it is a sandbox-style game, which means that you have a lot of freedom to explore and customize the game world. You can design every room in the dreamhouse with wonderful wallpapers and dazzling decorations. You can also change the furniture, appliances, accessories, plants, pets, and more. You can make it your own dreamhouse!

-

A game with many fun activities

-

Another feature of Barbie Dreamhouse Adventures is that it has many fun activities that you can do with Barbie and her friends. You can join them in baking delicious cupcakes or cookies in the kitchen. You can also cook various recipes from different cuisines and share them on BarbieGram, the social media app in the game. You can also dance your heart out in the dance studio or the rooftop lounge. You can also have fun with fashion and dress up in hundreds of outfits and accessories for every occasion. You can also create different hairstyles and nail designs in the hair salon and the nail salon. You can also enjoy the summer time by going to the beach or having a pool party. You can also play a surf mini game where you can ride the waves with Barbie. You can also go on new adventures with Barbie and her friends, such as becoming a princess at a royal ball, a mermaid underwater, or a camper outdoors. You can also explore other players' dreamhouses and earn coins by visiting them.

-

A game with Barbie's friends and family

-

Another feature of Barbie Dreamhouse Adventures is that it has Barbie's friends and family, who join her on her adventures. You can meet Barbie's best friends, such as Teresa, Nikki, Renee, Daisy, and Ken. You can also meet Barbie's sisters, Skipper, Stacie, and Chelsea, who have their own personalities and hobbies. You can also meet Barbie's parents, Mr. and Mrs. Roberts, who are supportive and caring. You can interact with them and learn more about their stories.

-

How to Download Barbie Dreamhouse Adventures?

-

If you want to play Barbie Dreamhouse Adventures, you need to download it on your device. The game is available for Android devices, iOS devices, PC or Mac. Here are the steps to download it:

-

For Android devices, download from Google Play Store

-

If you have an Android device, such as a smartphone or a tablet, you can download Barbie Dreamhouse Adventures from the Google Play Store. Here are the steps:

-

barbie dreamhouse adventures game free download for android
-barbie dreamhouse adventures game download for pc
-barbie dreamhouse adventures game online play
-barbie dreamhouse adventures game apk download
-barbie dreamhouse adventures game mod apk
-barbie dreamhouse adventures game play store
-barbie dreamhouse adventures game app store
-barbie dreamhouse adventures game cheats
-barbie dreamhouse adventures game hack
-barbie dreamhouse adventures game review
-barbie dreamhouse adventures game walkthrough
-barbie dreamhouse adventures game tips and tricks
-barbie dreamhouse adventures game latest version
-barbie dreamhouse adventures game update
-barbie dreamhouse adventures game features
-barbie dreamhouse adventures game guide
-barbie dreamhouse adventures game how to play
-barbie dreamhouse adventures game install
-barbie dreamhouse adventures game requirements
-barbie dreamhouse adventures game size
-barbie dreamhouse adventures game rating
-barbie dreamhouse adventures game trailer
-barbie dreamhouse adventures game videos
-barbie dreamhouse adventures game screenshots
-barbie dreamhouse adventures game characters
-barbie dreamhouse adventures game activities
-barbie dreamhouse adventures game malibu beach
-barbie dreamhouse adventures game princess adventure
-barbie dreamhouse adventures game royal ball
-barbie dreamhouse adventures game mermaid adventure
-barbie dreamhouse adventures game camping adventure
-barbie dreamhouse adventures game cooking and baking
-barbie dreamhouse adventures game dress up and makeover
-barbie dreamhouse adventures game hairstyles and accessories
-barbie dreamhouse adventures game nail salon and spa
-barbie dreamhouse adventures game pool party and surfing
-barbie dreamhouse adventures game home design and decoration
-barbie dreamhouse adventures game wallpapers and stickers
-barbie dreamhouse adventures game friends and family
-barbie dreamhouse adventures game ken and skipper
-barbie dreamhouse adventures game renee and daisy
-barbie dreamhouse adventures game teresa and nikki
-barbie dreamhouse adventures game stacie and chelsea
-barbie dreamhouse adventures game mr. and mrs. roberts
-barbie dreamhouse adventures game pink convertible and bike
-barbie dreamhouse adventures game subscription and in-app purchases

-
    -
  1. Open the Google Play Store app on your device.
  2. -
  3. Search for "Barbie Dreamhouse Adventures" in the search bar.
  4. -
  5. Select the game from the list of results.
  6. -
  7. Tap on "Install" to download and install the game.
  8. -
  9. Wait for the installation to finish.
  10. -
  11. Tap on "Open" to launch the game.
  12. -
-

You can also download the game from this link: [Barbie Dreamhouse Adventures]

-

For iOS devices, download from App Store

-

If you have an iOS device, such as an iPhone or an iPad, you can download Barbie Dreamhouse Adventures from the App Store. Here are the steps:

-
    -
  1. Open the App Store app on your device.
  2. -
  3. Search for "Barbie Dreamhouse Adventures" in the search bar.
  4. -
  5. Select the game from the list of results.
  6. -
  7. Tap on "Get" to download and install the game.
  8. -
  9. Wait for the installation to finish.
  10. -
  11. Tap on "Open" to launch the game.
  12. -
-

You can also download the game from this link: [Barbie Dreamhouse Adventures]

-

For PC or Mac, download from BlueStacks

-

If you want to play Barbie Dreamhouse Adventures on your PC or Mac, you need to use an emulator software that allows you to run Android apps on your computer. One of the most popular emulators is BlueStacks, which is free and easy to use. Here are the steps:

-
    -
  1. Download and install BlueStacks from this link: [BlueStacks]
  2. -
  3. Launch BlueStacks on your computer.
  4. -
  5. Sign in with your Google account or create one if you don't have one.
  6. -
  7. Search for "Barbie Dreamhouse Adventures" in the search bar.
  8. -
  9. Select the game from the list of results.
  10. -
  11. Click on "Install" to download and install the game.
  12. -
  13. Wait for the installation to finish.
  14. -
  15. Click on "Open" to launch the game.
  16. -
-

What are the Features of Barbie Dreamhouse Adventures?

-

Barbie Dreamhouse Adventures is a game that has many features that make it fun and creative. Here are some of them:

-

Home design makeovers: decorate every room with wallpapers and furniture

-

One of the features of Barbie Dreamhouse Adventures is that you can decorate every room in the dreamhouse with your own style. You can choose from different wallpapers and furniture that suit your taste. You can also change them anytime you want. You can make every room look amazing!

-

Cooking and baking: make delicious recipes and share them on BarbieGram

-

Another feature of Barbie Dreamhouse Adventures is that you can make delicious recipes and share them on BarbieGram, the social media app in the game. You can cook various dishes from different cuisines, such as pizza, sushi, tacos, burgers, and more. You can also bake yummy cupcakes or cookies in the oven. You can then take photos of your creations and post them on BarbieGram, where you can get likes and comments from other players.

-

Dress up: choose from beautiful outfits and accessories for every occasion

-

Another feature of Barbie Dreamhouse Adventures is that you can dress up in beautiful outfits and accessories for every occasion. You can choose from hundreds of clothes, shoes, bags, jewelry, sunglasses, hats, and more. You can also mix and match different items to create your own style. You can dress up for parties, dates, beach days, camping trips, or just for fun. You can also change your outfit anytime you want.

-

Hairstyles and nail salon: create different hairstyles and nail designs

-

Another feature of Barbie Dreamhouse Adventures is that you can create different hairstyles and nail designs in the hair salon and the nail salon. You can choose from different hair colors, lengths, styles, and accessories. You can also choose from different nail shapes, colors, patterns, and stickers. You can make your hair and nails look fabulous!

-

Summer time: enjoy the beach and the pool, and play a surf mini game

-

Another feature of Barbie Dreamhouse Adventures is that you can enjoy the summer time by going to the beach or having a pool party. You can swim in the water, sunbathe on the sand, or play with beach balls and sand castles. You can also play a surf mini game where you can ride the waves with Barbie. You can have a lot of fun in the sun!

-

New adventures: become a princess at a royal ball, a mermaid underwater, or a camper outdoors

-

Another feature of Barbie Dreamhouse Adventures is that you can go on new adventures with Barbie and her friends. You can become a princess at a royal ball, where you can wear a gorgeous gown and dance with Ken. You can also become a mermaid underwater, where you can swim with dolphins and explore a coral reef. You can also become a camper outdoors, where you can roast marshmallows and sleep in a tent. You can experience different worlds with Barbie!

-

Explore friends' dreamhouses: visit other players' houses and earn coins

-

Another feature of Barbie Dreamhouse Adventures is that you can explore other players' dreamhouses and earn coins by visiting them. You can see how other players have decorated their houses and what activities they have done. You can also leave comments and likes on their posts. You can also earn coins by completing tasks or watching videos in their houses. You can use the coins to buy more items and activities for your own dreamhouse.

-

What are the Reviews of Barbie Dreamhouse Adventures?

-

Barbie Dreamhouse Adventures is a game that has received mixed reviews from players. Some players love the game for its graphics, variety, and creativity. They enjoy designing their own dreamhouses and playing with Barbie and her friends. They also appreciate the updates that add new features and content to the game. However, some players dislike the game for its ads, in-app purchases, and glitches. They complain about the frequent ads that interrupt the gameplay or require payment to remove them. They also complain about the expensive in-app purchases that limit their access to some items and activities. They also report some glitches that cause the game to crash or freeze.

-

Positive reviews: praise the game for its graphics, variety, and creativity

-

Some examples of positive reviews are:

-
    -
  • "I love this game so much! It's so fun and creative! I love designing my own dreamhouse and playing with Barbie's friends! The graphics are amazing and the activities are so fun! I recommend this game to everyone who loves Barbie!"
  • -
  • "This game is awesome! It has so many things to do and explore! I love decorating my house and dressing up Barbie! The game is always updated with new features and content! It's like having your own Barbie world!"
  • -
  • "This game is so cool! It's like a sandbox game where you can do whatever you want! I love cooking, baking, dancing, surfing, camping, and more! The game is very colorful and realistic! It's one of my favorite games ever!"
  • -
-

Negative reviews: criticize the game for its ads, in-app purchases, and glitches

-

Some examples of negative reviews are:

-
    -
  • "This game is terrible! It has so many ads that ruin the game! Every time I try to do something, an ad pops up and I have to watch it or pay to remove it! It's so annoying and unfair!"
  • -
  • "This game is a rip-off! It has so many in-app purchases that make the game impossible to play! Everything is locked and you have to pay real money to unlock it! It's so expensive and greedy!"
  • -
  • "This game is buggy! It has so many glitches that make the game crash or freeze! Sometimes the game doesn't load or save properly, or the items disappear or move around. It's so frustrating and disappointing!"
  • -
-

What are some Tips for Playing Barbie Dreamhouse Adventures?

-

If you want to enjoy Barbie Dreamhouse Adventures more, here are some tips for playing it:

-

Watch videos to earn coins and gems, which can be used to unlock items and activities

-

One of the tips for playing Barbie Dreamhouse Adventures is to watch videos to earn coins and gems, which are the currencies in the game. You can use coins and gems to buy more items and activities for your dreamhouse. You can watch videos by tapping on the TV icon in the game. You can also watch videos by visiting other players' houses or by completing tasks. You can earn up to 100 coins or 10 gems per video.

-

Complete daily tasks to earn rewards and achievements

-

Another tip for playing Barbie Dreamhouse Adventures is to complete daily tasks to earn rewards and achievements. You can see your daily tasks by tapping on the clipboard icon in the game. You can complete tasks such as decorating your house, dressing up Barbie, cooking or baking, playing mini games, or visiting friends. You can earn coins, gems, stickers, or items as rewards. You can also earn achievements by completing certain milestones, such as reaching a certain level, collecting a certain number of items, or doing a certain number of activities. You can see your achievements by tapping on the trophy icon in the game.

-

Invite friends to play with you and exchange gifts

-

Another tip for playing Barbie Dreamhouse Adventures is to invite friends to play with you and exchange gifts. You can invite friends by tapping on the friend icon in the game. You can see your friends' list, add new friends, or accept friend requests. You can also visit your friends' houses, chat with them, like or comment on their posts, or send them gifts. You can also receive gifts from your friends, such as coins, gems, stickers, or items.

-

Check out the Budge Studios website for more information and support

-

Another tip for playing Barbie Dreamhouse Adventures is to check out the Budge Studios website for more information and support. You can visit the website by tapping on the Budge Studios logo in the game. You can find more information about the game, such as its features, updates, privacy policy, terms of use, and more. You can also find support for the game, such as FAQs, troubleshooting tips, contact details, and more.

-

Conclusion

-

Barbie Dreamhouse Adventures is a fun and creative simulation game for kids that lets you create your own Barbie Dreamhouse experience. You can explore and customize Barbie's dreamhouse, join her on exciting adventures, and have fun with her friends and family. You can also download the game on your Android device, iOS device, PC or Mac. The game has many features that make it enjoyable and diverse. However, the game also has some drawbacks that may affect your gameplay experience. Therefore, you may want to follow some tips for playing it better.

-

FAQs

-

Here are some frequently asked questions about Barbie Dreamhouse Adventures:

-
    -
  1. Is Barbie Dreamhouse Adventures free?
  2. -

    Barbie Dreamhouse Adventures is free to download and play. However, it contains ads that may interrupt your gameplay or require payment to remove them. It also contains in-app purchases that may limit your access to some items and activities.

    -
  3. Is Barbie Dreamhouse Adventures safe?
  4. -

    Barbie Dreamhouse Adventures is safe for kids to play. It does not contain any violence, profanity, or inappropriate content. However, it does require an internet connection and access to some device features, such as camera and microphone. It also allows you to interact with other players online. Therefore, you may want to supervise your kids while they play or adjust the settings accordingly.

    -
  5. Is Barbie Dreamhouse Adventures offline?
  6. -

    Barbie Dreamhouse Adventures requires an internet connection to play. You cannot play the game without an internet connection. You need an internet connection to download the game, access its features, update its content, and interact with other players.

    -
  7. How do I update Barbie Dreamhouse Adventures?
  8. -

    Barbie Dreamhouse Adventures is updated regularly with new features and content. You can update the game automatically or manually. To update the game automatically, you need to enable the auto-update option in your device settings. To update the game manually, you need to visit the Google Play Store or the App Store and check for updates. You can also visit the Budge Studios website for more information about the latest updates.

    -
  9. How do I contact Budge Studios?
  10. -

    If you have any questions, feedback, or issues about Barbie Dreamhouse Adventures, you can contact Budge Studios by visiting their website and filling out a contact form. You can also email them at support@budgestudios.ca or call them at +1 514 343 0148. You can also follow them on Facebook, Twitter, Instagram, or YouTube for more news and updates.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/COD Mobile public test server APK (64-bit) How to install and what to expect.md b/spaces/congsaPfin/Manga-OCR/logs/COD Mobile public test server APK (64-bit) How to install and what to expect.md deleted file mode 100644 index 93e9491e9b0ad07b2bd2d84f43f48530b5706cb2..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/COD Mobile public test server APK (64-bit) How to install and what to expect.md +++ /dev/null @@ -1,136 +0,0 @@ - -

Call of Duty Mobile Public Test Server 64 Bit APK: How to Download and What to Expect

-

Call of Duty Mobile is one of the most popular and successful mobile games in the world, with millions of players enjoying its thrilling multiplayer and battle royale modes. But did you know that you can also get an early access to some of the upcoming features, content, and changes in the game by joining the Public Test Server?

-

call of duty mobile public test server 64 bit apk


Download Zip ⚙⚙⚙ https://urlca.com/2uOcuA



-

In this article, we will explain what the Public Test Server is, how to download it for your 64-bit device, and what you can expect from the latest test build. If you are a fan of Call of Duty Mobile and want to experience some of the new stuff before anyone else, then read on!

-

What is Call of Duty Mobile Public Test Server?

-

The Public Test Server, or PTB, is a separate version of Call of Duty Mobile that allows players to test some of the new features, content, and changes that are planned for the next season or update. The PTB is not the final version of the game, and it may contain bugs, errors, or unfinished elements. The purpose of the PTB is to gather feedback from players and improve the quality of the game before releasing it to the public.

-

The PTB is usually available for a limited time and for a limited number of players. It is also updated frequently with new content and fixes. The PTB is free to download and play, but you need to have a separate account for it. You cannot use your main account or transfer your progress or items from the main game to the PTB.

-

Why should you join the Public Test Server?

-

There are many reasons why you should join the Public Test Server if you are a Call of Duty Mobile enthusiast. Here are some of them:

-
    -
  • You can get a sneak peek at some of the new maps, modes, weapons, and more that are coming to the game soon.
  • -
  • You can provide feedback to the developers and help them improve the game.
  • -
  • You can have fun trying out new things and experimenting with different strategies.
  • -
  • You can meet other players who share your passion for Call of Duty Mobile.
  • -
-

How to download the Public Test Server APK for 64-bit devices?

-

If you have a 64-bit device and want to join the Public Test Server, you need to download and install the APK file for it. The APK file is a package that contains all the necessary files and data for running an app on your device. You can download it from various sources online, but make sure you use a trusted and safe one.

-

Here are the steps you need to follow to download and install the Public Test Server APK for 64-bit devices:

-

call of duty mobile ptb 2022 global version apk download
-cod mobile season 5 test build apk download link
-how to install cod mobile public test server apk
-cod mobile test server apk 64 bit download
-call of duty mobile public test build reddit
-cod mobile season 5 2023 test server download
-call of duty mobile ptb apk download link 32 bit
-cod mobile test server new maps and weapons
-call of duty mobile public test build feedback
-cod mobile season 5 test server ios
-call of duty mobile ptb 2022 new content
-cod mobile test server installation from unknown sources
-call of duty mobile public test build error and bug report
-cod mobile season 5 test server size and duration
-call of duty mobile ptb 2022 net energy gain
-cod mobile test server apk download ginx esports tv
-call of duty mobile public test build launch and register
-cod mobile season 5 test server valorant and genshin impact fan
-call of duty mobile ptb 2022 holy grail fusion experiment
-cod mobile test server apk download dot esports
-call of duty mobile public test build community update
-cod mobile season 5 test server activision and korea institute of fusion energy
-call of duty mobile ptb 2022 kilo 141 and tim david
-cod mobile test server apk download yahoo news
-call of duty mobile public test build voice over optimization
-cod mobile season 5 test server rewards and modes
-call of duty mobile ptb 2022 muzzle fire smoke effect
-cod mobile test server apk download wikipedia
-call of duty mobile public test build controls optimization and sensitivity
-cod mobile season 5 test server weapon balance adjustments
-call of duty mobile ptb 2022 resource download optimization
-cod mobile test server apk download montana solar physics
-call of duty mobile public test build new scorestreaks and functional weapons
-cod mobile season 5 test server battle royale map changes
-call of duty mobile ptb 2022 three new mp maps for s1 or s2
-cod mobile test server apk download cornell university astronomy department
-call of duty mobile public test build sun core temperature kelvin
-cod mobile season 5 test server new scientist physics
-call of duty mobile ptb 2022 the sun news
-cod mobile test server apk download forbes sports betting
-call of duty mobile public test build ndtv shubman gill
-cod mobile season 5 test server msn the i world cup
-call of duty mobile ptb 2022 fireboy and watergirl
-cod mobile test server apk download it takes two
-call of duty mobile public test build minecraft the wild update
-cod mobile season 5 test server honkai star rail
-call of duty mobile ptb 2022 retro games mario roadrash need for speed ii
-cod mobile test server apk download hard drive sportskeeda gamepur

-

Step 1: Download the APK file

-

You can find the latest APK file for the Public Test Server on Reddit, where Activision posts regular community updates. You can also use these direct links:

-
    -
  • [32-bit version](^1^)
  • -
  • [64-bit version](^2^)
  • -
-

The APK file size is about 640 MB, so make sure you have enough storage space on your device and a stable internet connection.

-

Step 2: Install the APK file

-

Once you have downloaded the APK file, you need to install it on your device. To do that, follow these steps:

-
    -
  1. Go to your downloads folder and locate the APK file.
  2. -
  3. Tap on it to - Allow your device to install apps from unknown sources. You can do this by going to Settings > Security > Unknown Sources and enabling the option. You may also need to grant permission to your browser or file manager to install the APK file.
  4. -
  5. Follow the on-screen instructions to complete the installation process.
  6. -
-

Step 3: Launch the Public Test Server and register your account

-

After you have installed the APK file, you can launch the Public Test Server app from your app drawer or home screen. You will see a different icon and name for the app, indicating that it is not the main game.

-

The first time you launch the app, you will need to register your account for the Public Test Server. You can use any email address and password you want, but remember that they are not linked to your main account. You will also need to agree to the terms of service and privacy policy of the Public Test Server.

-

Once you have registered your account, you can log in and start playing the Public Test Server. You will have access to all the content and features that are available in the test build, but keep in mind that they are subject to change and may not reflect the final version of the game.

-

What's new in the latest Public Test Server?

-

The latest Public Test Server, which is available from June 21 to June 28, 2023, introduces some exciting new content and changes to Call of Duty Mobile. Here are some of them:

-

New maps

-

The Public Test Server features two new maps that are coming to the game soon: Tunisia and Coastal. Tunisia is a medium-sized map set in a North African town, with narrow streets, rooftops, and courtyards. Coastal is a small-sized map set in a seaside resort, with a hotel, a beach, and a pier. Both maps are suitable for various modes and offer different tactical options.

-

New weapons

-

The Public Test Server also introduces two new weapons that are coming to the game soon: CR-56 AMAX and Shorty. CR-56 AMAX is an assault rifle that has high damage and accuracy, but low fire rate and mobility. Shorty is a shotgun that has high damage and mobility, but low range and magazine capacity. Both weapons can be customized with various attachments and camos.

-

New scorestreak

-

The Public Test Server also features a new scorestreak that is coming to the game soon: Hawk X3. Hawk X3 is a drone that can fly around the map and fire missiles at enemies. It can be controlled manually or automatically, and it can be destroyed by enemy fire. The Hawk X3 scorestreak requires 750 points to activate.

-

New muzzle fire smoke effect

-

The Public Test Server also adds a new visual effect to the game: muzzle fire smoke. This effect creates realistic smoke trails when firing weapons, adding more immersion and realism to the game. The muzzle fire smoke effect can be turned on or off in the settings menu.

-

Weapon balance adjustments

-

The Public Test Server also makes some adjustments to the weapon balance in the game, based on player feedback and data analysis. Some of the changes include:

-
    -
  • Reducing the damage range of SMGs
  • -
  • Increasing the recoil of LMGs
  • -
  • Decreasing the ADS speed of snipers
  • -
  • Increasing the hip-fire accuracy of shotguns
  • -
  • Adjusting the damage multipliers of various weapons
  • -
-

These changes are intended to improve the overall balance and diversity of weapons in the game, and may not be final.

-

Conclusion

-

The Public Test Server is a great way to experience some of the new content and changes that are coming to Call of Duty Mobile soon. It is also a great way to provide feedback to the developers and help them improve the game. If you have a 64-bit device and want to join the Public Test Server, you can follow our guide on how to download and install the APK file for it.

-

We hope you enjoyed this article and found it helpful. If you have any questions or comments, feel free to leave them below. And don't forget to share this article with your friends who play Call of Duty Mobile!

-

Frequently Asked Questions

-
    -
  1. Can I play with my friends on the Public Test Server?
  2. -

    Yes, you can play with your friends on the Public Test Server, as long as they have also downloaded and installed it on their devices. You can invite them to your lobby or join their lobby through the friends list. However, you cannot play with players who are on the main game or on a different version of the Public Test Server.

    -
  3. Will my progress and items on the Public Test Server carry over to the main game?
  4. -

    No, your progress and items on the Public Test Server will not carry over to the main game. The Public Test Server is a separate version of the game that has no connection to your main account. You will start with a new account and a default loadout on the Public Test Server, and you will lose everything when the Public Test Server ends.

    -
  5. How can I provide feedback to the developers on the Public Test Server?
  6. -

    You can provide feedback to the developers on the Public Test Server through various channels, such as:

    -
      -
    • The in-game feedback button, which is located on the top right corner of the main menu.
    • -
    • The official Call of Duty Mobile subreddit, where you can post your comments, suggestions, bug reports, and more.
    • -
    • The official Call of Duty Mobile Discord server, where you can chat with other players and moderators.
    • -
    • The official Call of Duty Mobile social media accounts, such as Facebook, Twitter, Instagram, and YouTube.
    • -
    -

    Make sure you provide constructive and respectful feedback, and include as much detail and evidence as possible.

    -
  7. How can I uninstall the Public Test Server from my device?
  8. -

    If you want to uninstall the Public Test Server from your device, you can do so by following these steps:

    -
      -
    1. Go to your settings menu and select Apps or Applications.
    2. -
    3. Find and select the Public Test Server app, which has a different icon and name than the main game.
    4. -
    5. Tap on Uninstall and confirm your choice.
    6. -
    -

    This will remove the Public Test Server app and all its data from your device. You can also delete the APK file from your downloads folder if you want to free up some storage space.

    -
  9. When will the next Public Test Server be available?
  10. -

    The next Public Test Server will be available when the developers have some new content or changes that they want to test with players. There is no fixed schedule or frequency for the Public Test Server, and it may vary depending on the development cycle and plans. The best way to stay updated on the latest news and announcements about the Public Test Server is to follow the official Call of Duty Mobile channels that we mentioned above.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/H360 App How to Unleash the Potential of Your Network and Maximize Impact.md b/spaces/congsaPfin/Manga-OCR/logs/H360 App How to Unleash the Potential of Your Network and Maximize Impact.md deleted file mode 100644 index 0bd82d657e54eb36901aa3ba91f0e27f405feca4..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/H360 App How to Unleash the Potential of Your Network and Maximize Impact.md +++ /dev/null @@ -1,159 +0,0 @@ -
-
- How to download H360 app for Android and iOS devices
- How to use H360 app to manage your community, collaborate with others, and maximize your impact | | H2: What is H360 App and What Are Its Benefits | - H360 app is a community management platform that helps you unleash the potential of your network
- H360 app allows you to create and join communities based on your interests, goals, and passions
- H360 app enables you to connect with like-minded people, share ideas, resources, and opportunities, and support each other's growth
- H360 app helps you track your progress, measure your impact, and celebrate your achievements | | H2: How to Download H360 App for Android and iOS Devices | - H360 app is available for free on Google Play Store and Apple App Store
- To download H360 app, you need to have a compatible device and an internet connection
- Follow these steps to download H360 app on your device:
- H3: For Android devices:
- Go to Google Play Store and search for "H360"
- Tap on the "Install" button and wait for the app to download
- Open the app and sign up with your email or social media account
- H3: For iOS devices:
- Go to Apple App Store and search for "H360"
- Tap on the "Get" button and wait for the app to download
- Open the app and sign up with your email or social media account | | H2: How to Use H360 App to Manage Your Community, Collaborate with Others, and Maximize Your Impact | - H360 app has a user-friendly interface that allows you to easily navigate through its features
- Here are some of the things you can do with H360 app:
- H3: Manage your community:
- Create or join communities that match your interests, goals, and passions
- Invite or accept members who share your vision and values
- Communicate with your community members via chat, video call, or email
- Manage your community events, activities, and projects
- H3: Collaborate with others:
- Discover and connect with other users who have similar or complementary skills, expertise, or resources
- Share your ideas, insights, feedback, or challenges with your collaborators
- Work together on tasks, goals, or initiatives that benefit you and your community
- H3: Maximize your impact:
- Set your personal or professional goals and track your progress
- Measure your impact using various metrics such as engagement, reach, revenue, or social good
- Celebrate your achievements and recognize others who have contributed to your success | Table 2: Article with HTML formatting

What is H360 App and Why You Should Download It

-

If you are looking for a way to manage your community, collaborate with others, and maximize your impact, then you should consider downloading the H360 app. In this article, we will explain what the H360 app is, what are its benefits, how to download it for Android and iOS devices, and how to use it effectively.

-

What is H360 App and What Are Its Benefits

-

H360 app is a community management platform that helps you unleash the potential of your network. Whether you are an entrepreneur, a freelancer, a student, a professional, or a hobbyist, you can use the H360 app to create and join communities based on your interests, goals, and passions.

-

h360 app download


Download Filehttps://urlca.com/2uO4ZZ



-

H360 app allows you to connect with like-minded people who share your vision and values. You can chat with them, video call them, or email them anytime. You can also share ideas, resources, and opportunities with them. You can support each other's growth by giving feedback, advice, or encouragement.

-

H360 app also helps you track your progress, measure your impact, and celebrate your achievements. You can set your personal or professional goals and monitor how well you are doing. You can also see how much value you are creating for yourself and others. You can reward yourself and others who have contributed to your success.

-

By using the H360 app, you can not only manage your community, but also collaborate with others and maximize your impact. You can become a part of a global network of changemakers who are making a difference in the world.

-

How to Download H360 App for Android and iOS Devices

-

H360 app is available for free on Google Play Store and Apple App Store. To download the H360 app, you need to have a compatible device and an internet connection. Follow these steps to download the H360 app on your device:

-

For Android devices:

-
    -
  • Go to Google Play Store and search for "H360"
  • -
  • Tap on the "Install" button and wait for the app to download
  • -
  • Open the app and sign up with your email or social media account
  • -
-

For iOS devices:

-
    -
  • Go to Apple App Store and search for "H360"
  • -
  • Tap on the "Get" button and wait for the app to download
  • -
  • Open the app and sign up with your email or social media account
  • -
-

How to Use H360 App to Manage Your Community, Collaborate with Others, and Maximize Your Impact

-

H360 app has a user-friendly interface that allows you to easily navigate through its features. Here are some of the things you can do with the H360 app:

-

Manage your community:

-
    -
  • Create or join communities that match your interests, goals, and passions. You can browse through different categories such as business, education, health, arts, sports, etc. or search for specific keywords.
  • -
  • Invite or accept members who share your vision and values. You can send invitations to your contacts or social media friends, or accept requests from other users who want to join your community.
  • -
  • Communicate with your community members via chat, video call, or email. You can send messages, voice notes, images, videos, documents, or links to your community members. You can also start or join video calls with one or more members.
  • -
  • Manage your community events, activities, and projects. You can create or join events such as webinars, workshops, meetups, etc. that are relevant to your community. You can also create or join activities such as challenges, quizzes, polls, etc. that are fun and engaging. You can also create or join projects such as campaigns, fundraisers, petitions, etc. that are impactful and meaningful.
  • -
-

Collaborate with others:

-
    -
  • Discover and connect with other users who have similar or complementary skills, expertise, or resources. You can browse through different profiles of users who are part of your community or other communities. You can also search for specific keywords or filters such as location, industry, role, etc.
  • -
  • Share your ideas, insights, feedback, or challenges with your collaborators. You can post your thoughts on the app's feed or comment on other posts. You can also ask questions or answer questions from other users.
  • -
  • Work together on tasks, goals, or initiatives that benefit you and your community. You can create or join tasks such as assignments, homeworks, projects, etc. that are related to your goals. You can also create or join goals such as learning a new skill, launching a product, getting a job, etc. that are aligned with your passions. You can also create or join initiatives such as volunteering, mentoring, coaching, etc. that are supportive of your growth.
  • -
-

Maximize your impact:

-
    -
  • Set your personal or professional goals and track your progress. You can create SMART (Specific, Measurable, Achievable, Relevant, and Time-bound) goals and track your progress using the app's dashboard. You can also see how your goals are connected to your community's goals and how you can support each other.
  • -
  • Measure your impact using various metrics such as engagement, reach, revenue, or social good. You can see how your actions are affecting your community and the world. You can also see how your impact compares to other users or communities.
  • -
  • Celebrate your achievements and recognize others who have contributed to your success. You can share your milestones, testimonials, awards, or recognition on the app's feed or on your profile. You can also appreciate others who have helped you along the way by sending them thank you notes, badges, or gifts.
  • -
-

By using the H360 app, you can not only manage your community, but also collaborate with others and maximize your impact. You can become a part of a global network of changemakers who are making a difference in the world.

-

h360 watch face yosash android app
-logitech usb headset h360 support
-h360 hybrid watch face for wear os
-how to install h360 watch face on pc
-logitech usb headset h360 warranty
-h360 watch face yosash appbrain
-logitech usb headset h360 specifications
-h360 watch face yosash moon phase
-logitech usb headset h360 downloads
-h360 watch face yosash 12/24 hour
-logitech usb headset h360 faq
-h360 watch face yosash steps count
-logitech usb headset h360 spare parts
-h360 watch face yosash day of month
-logitech usb headset h360 gallery
-h360 watch face yosash ldplayer
-logitech usb headset h360 getting started
-h360 watch face yosash switchable mode
-logitech usb headset h360 documents
-h360 watch face yosash hybrid design

-

Conclusion

-

H360 app is a community management platform that helps you unleash the potential of your network. You can use the H360 app to create and join communities based on your interests, goals, and passions. You can also connect with like-minded people, share ideas, resources, and opportunities, and support each other's growth. You can also track your progress, measure your impact, and celebrate your achievements. H360 app is available for free on Google Play Store and Apple App Store. To download the H360 app, you need to have a compatible device and an internet connection. To use the H360 app effectively, you need to follow these steps:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
StepAction
1Download the H360 app on your device
2Sign up with your email or social media account
3Create or join communities that match your interests, goals, and passions
4Invite or accept members who share your vision and values
5Communicate with your community members via chat, video call, or email
6Manage your community events, activities, and projects
7Discover and connect with other users who have similar or complementary skills, expertise, or resources
8Share your ideas, insights, feedback, or challenges with your collaborators
9Work together on tasks, goals, or initiatives that benefit you and your community
10Set your personal or professional goals and track your progress
11Measure your impact using various metrics such as engagement, reach, revenue, or social good
12Celebrate your achievements and recognize others who have contributed to your success
-

If you want to learn more about the H360 app, you can visit their website or follow them on social media. You can also contact their support team if you have any questions or issues. H360 app is more than just an app, it is a movement. Join the H360 app today and become a part of the change.

-

FAQs

-
    -
  • What is the H360 app?
    -The H360 app is a community management platform that helps you unleash the potential of your network. You can use the H360 app to create and join communities based on your interests, goals, and passions. You can also connect with like-minded people, share ideas, resources, and opportunities, and support each other's growth. You can also track your progress, measure your impact, and celebrate your achievements.
  • -
  • How can I download the H360 app?
    -The H360 app is available for free on Google Play Store and Apple App Store. To download the H360 app, you need to have a compatible device and an internet connection. Follow these steps to download the H360 app on your device:
    - - For Android devices:
    - - Go to Google Play Store and search for "H360"
    - - Tap on the "Install" button and wait for the app to download
    - - Open the app and sign up with your email or social media account
    - - For iOS devices:
    - - Go to Apple App Store and search for "H360"
    - - Tap on the "Get" button and wait for the app to download
    - - Open the app and sign up with your email or social media account
  • -
  • How can I use the H360 app effectively?
    -To use the H360 app effectively, you need to follow these steps:
    - - Download the H360 app on your device
    - - Sign up with your email or social media account
    - - Create or join communities that match your interests, goals, and passions
    - - Invite or accept members who share your vision and values
    - - Communicate with your community members via chat, video call, or email
    - - Manage your community events, activities, and projects
    - - Discover and connect with other users who have similar or complementary skills, expertise, or resources
    - - Share your ideas, insights, feedback, or challenges with your collaborators
    - - Work together on tasks, goals, or initiatives that benefit you and your community
    - - Set your personal or professional goals and track your progress
    - - Measure your impact using various metrics such as engagement, reach, revenue, or social good
    - - Celebrate your achievements and recognize others who have contributed to your success
  • -
  • What are the benefits of using the H360 app?
    -The benefits of using the H360 app are:
    - - You can manage your community effectively and efficiently
    - - You can collaborate with others easily and conveniently
    - - You can maximize your impact significantly and sustainably
    - - You can become a part of a global network of changemakers who are making a difference in the world
  • -
  • Where can I find more information about the H360 app?
    -You can find more information about the H360 app on their website or on their social media channels. You can also contact their support team if you have any questions or issues.
  • 401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Juice WRLD Music APK Listen to the Best Songs Offline for Free.md b/spaces/congsaPfin/Manga-OCR/logs/Juice WRLD Music APK Listen to the Best Songs Offline for Free.md deleted file mode 100644 index c5f71d7d7d475c7c4ec74eda68f63c4dc480c12a..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Juice WRLD Music APK Listen to the Best Songs Offline for Free.md +++ /dev/null @@ -1,135 +0,0 @@ -
    -

    Download Juice WRLD Music APK: How to Enjoy the Best of Emo Rap on Your Android Device

    -

    If you are a fan of emo rap, SoundCloud rap, or alternative hip hop, you have probably heard of Juice WRLD, one of the most influential and popular artists in these genres. His music is known for its catchy melodies, emotional lyrics, and diverse influences. In this article, we will tell you more about who Juice WRLD was, why you should listen to his music, and how to download Juice WRLD music apk files for your Android device.

    -

    download juice wrld music apk


    Download File >>> https://urlca.com/2uOdZp



    -

    Who is Juice WRLD and Why Should You Listen to His Music?

    -

    Juice WRLD was the stage name of Jarad Anthony Higgins, an American rapper, singer, and songwriter who was born in 1998 and died in 2019. He started his career as an independent artist on SoundCloud in 2015, and later signed with Grade A Productions and Interscope Records in 2017. He gained recognition with his hit single "Lucid Dreams", which peaked at number two on the US Billboard Hot 100. He also released two studio albums, Goodbye & Good Riddance (2018) and Death Race for Love (2019), as well as several mixtapes and collaborations with other artists such as Future, Marshmello, and Young Thug. He died following a drug overdose on December 8, 2019.

    -

    A Brief Biography of Juice WRLD

    -

    Juice WRLD grew up in the South Suburbs of Chicago, where he attended Homewood-Flossmoor High School. His parents divorced when he was three years old, and his father left, leaving his mother to raise him and an older brother as a single parent. His mother was very religious and conservative, and did not let him listen to hip hop. He was influenced by rock and pop music, especially by artists such as Nirvana, Green Day, Fall Out Boy, Black Sabbath, Megadeth, Billy Idol, Blink-182, and Panic! at the Disco. He learned to play the piano, guitar, and drums by himself. He began rapping in his sophomore year of high school under the name JuiceTheKidd. He changed his name to Juice WRLD in 2017, inspired by the film Juice (1992) starring Tupac Shakur.

    -

    The Main Features and Themes of Juice WRLD's Music

    -

    Juice WRLD's music is characterized by its melodic hooks, emo-inspired vocals, trap beats, guitar samples, and lo-fi production. He often sang or rapped about topics such as love, heartbreak, drugs, depression, suicide, anxiety, and death. He also incorporated elements from various genres such as rock, pop punk, R&B, soul, metal, grunge, and emo. Some of his most popular songs include "Lucid Dreams", "All Girls Are the Same", "Lean wit Me", "Robbery", "Wasted", "Armed and Dangerous", "Legends", "Come & Go", "Righteous", "Smile", "Bad Boy", "Life's a Mess", "Hate the Other Side", "Conversations", "Wishing Well", "Stay High", "Blood on My Jeans", "Man of the Year", "Already Dead", and "Reminds Me of You". He also collaborated with artists such as Future, Marshmello, Young Thug, Lil Uzi Vert, The Weeknd, Halsey, Eminem, Benny Blanco, and The Kid LAROI.

    -

    download juice wrld songs offline app
    -juice wrld best music apk free
    -juice wrld lyrics and songs apk
    -how to download juice wrld music on android
    -juice wrld offline music player apk
    -download juice wrld latest songs apk
    -juice wrld music and wallpaper apk
    -juice wrld songs with lyrics offline apk
    -download juice wrld all songs apk
    -juice wrld music streaming apk
    -download juice wrld albums apk
    -juice wrld ringtones and music apk
    -juice wrld music and video apk
    -download juice wrld unreleased songs apk
    -juice wrld music quiz apk
    -download juice wrld tribute music apk
    -juice wrld music downloader apk
    -download juice wrld live wallpaper and music apk
    -juice wrld music and lyrics offline apk
    -download juice wrld 999 club music apk
    -juice wrld music and ringtone maker apk
    -download juice wrld legends never die music apk
    -juice wrld music and photo editor apk
    -download juice wrld lucid dreams music apk
    -juice wrld music and status saver apk

    -

    What is an APK and How to Download One for Juice WRLD Music?

    -

    If you want to enjoy Juice WRLD's music on your Android device, you might want to download an APK file for it. An APK file is an Android Package Kit file, which is the file format used by the Android operating system to distribute and install applications. APK files can offer several advantages over downloading apps from the Google Play Store, such as:

    -
      -
    • Accessing apps that are not available in your region or country
    • -
    • Getting the latest updates and features before they are officially released
    • -
    • Installing apps that are not compatible with your device or Android version
    • -
    • Bypassing restrictions or limitations imposed by the app developers or Google
    • -
    • Saving storage space and data usage by downloading smaller files
    • -
    -

    However, APK files also come with some risks and drawbacks, such as:

    -
      -
    • Exposing your device to malware, viruses, or spyware
    • -
    • Voiding your warranty or violating the terms of service of your device or Google
    • -
    • Causing compatibility issues or performance problems with your device or other apps
    • -
    • Missing out on official updates and support from the app developers or Google
    • -
    • Breaking the law or infringing the intellectual property rights of the app developers or Google
    • -
    -

    Therefore, you should always be careful and cautious when downloading and installing APK files for Juice WRLD music or any other app. You should only download APK files from trusted and reputable sources, such as official websites, blogs, forums, or social media pages of the app developers or Juice WRLD's team. You should also scan the APK files with a reliable antivirus software before opening them. You should also backup your device and data before installing any APK file, in case something goes wrong.

    -

    The Steps to Download and Install Juice WRLD Music APK

    -

    If you have decided to download and install an APK file for Juice WRLD music, here are the general steps you need to follow:

    -
      -
    1. Find a suitable APK file for Juice WRLD music from a trusted source. You can use a web browser or a file manager app to search for it.
    2. -
    3. Download the APK file to your device. You might need to enable the option to allow downloads from unknown sources in your device settings.
    4. -
    5. Locate the downloaded APK file on your device. You can use a file manager app or a notification bar to find it.
    6. -
    7. Tap on the APK file to open it. You might need to grant some permissions or accept some terms and conditions before proceeding.
    8. -
    9. Follow the instructions on the screen to install the APK file. You might need to wait for a few minutes until the installation is complete.
    10. -
    11. Launch the app from your app drawer or home screen. You can now enjoy Juice WRLD's music on your Android device.
    12. -
    -

    What are the Best Juice WRLD Music APKs Available Online?

    -

    There are many Juice WRLD music APKs available online, but not all of them are safe, reliable, or functional. Some of them might contain malware, viruses, spyware, ads, or bugs. Some of them might not work properly on your device or Android version. Some of them might not have all the features or songs you want. Therefore, you should always do some research and read some reviews before downloading and installing any Juice WRLD music APK. To help you out, we have selected two of the best Juice WRLD music APKs that we have tested and verified ourselves. Here they are:

    -

    Sony Music APK

    -

    Sony Music APK is an official app from Sony Music Entertainment, which is one of the major record labels that Juice WRLD was signed to. This app allows you to stream and download millions of songs from various artists and genres, including Juice WRLD's entire discography. You can also create playlists, discover new music, watch videos, listen to podcasts, and access exclusive content from Sony Music artists.

    -

    The Pros and Cons of Sony Music APK

    -

    Some of the pros of Sony Music APK are:

    -
      -
    • It is an official and legal app from a reputable source
    • -
    • It has a large and diverse catalog of music from various artists and genres
    • -
    • It has high-quality audio and video streaming and downloading options
    • It has a user-friendly and attractive interface and design -
    • It has some exclusive features and content from Sony Music artists
    • -
    -

    Some of the cons of Sony Music APK are:

    -
      -
    • It requires a subscription fee to access all the features and content
    • -
    • It might not be available in some regions or countries
    • -
    • It might not have some songs or artists that are not affiliated with Sony Music
    • -
    • It might have some bugs or glitches occasionally
    • -
    -

    The Download Link and Instructions for Sony Music APK

    -

    If you want to download and install Sony Music APK, you can follow these steps:

    -
      -
    1. Go to this link:
    2. -
    3. Tap on the "Download APK" button and wait for the file to be downloaded to your device
    4. -
    5. Open the downloaded file and follow the instructions on the screen to install the app
    6. -
    7. Launch the app and sign up for a free trial or a paid subscription
    8. -
    9. Search for Juice WRLD's music and enjoy!
    10. -
    -

    Juice WRLD - Lucid Dreams APK

    -

    Juice WRLD - Lucid Dreams APK is an unofficial app that is dedicated to Juice WRLD's music. This app allows you to stream and download all of Juice WRLD's songs, including his unreleased tracks, leaks, demos, remixes, and features. You can also read the lyrics, watch the videos, view the album art, and get the latest news and updates about Juice WRLD's music.

    -

    The Pros and Cons of Juice WRLD - Lucid Dreams APK

    -

    Some of the pros of Juice WRLD - Lucid Dreams APK are:

    -
      -
    • It is a free app that does not require any subscription or registration
    • -
    • It has a comprehensive and updated collection of Juice WRLD's music
    • -
    • It has a simple and easy-to-use interface and design
    • -
    • It has some extra features such as lyrics, videos, album art, and news
    • -
    -

    Some of the cons of Juice WRLD - Lucid Dreams APK are:

    -
      -
    • It is an unofficial and illegal app that might violate Juice WRLD's intellectual property rights
    • -
    • It might not be safe or secure to download and install on your device
    • -
    • It might not work properly on some devices or Android versions
    • -
    • It might not have some songs or artists that are not related to Juice WRLD
    • -
    • It might have some ads or pop-ups that might annoy you
    • -
    -

    The Download Link and Instructions for Juice WRLD - Lucid Dreams APK

    -

    If you want to download and install Juice WRLD - Lucid Dreams APK, you can follow these steps:

    -
      -
    1. Go to this link:
    2. -
    3. Tap on the "Download" button and wait for the file to be downloaded to your device
    4. -
    5. Open the downloaded file and follow the instructions on the screen to install the app
    6. -
    7. Launch the app and search for Juice WRLD's music and enjoy!
    8. -
    -

    Conclusion and FAQs

    -

    In conclusion, Juice WRLD was one of the most talented and influential artists in emo rap, SoundCloud rap, and alternative hip hop. His music is known for its catchy melodies, emotional lyrics, and diverse influences. If you want to enjoy his music on your Android device, you can download one of the two Juice WRLD music APKs we have recommended in this article: Sony Music APK or Juice WRLD - Lucid Dreams APK. However, you should always be careful and cautious when downloading and installing any APK file, as they might come with some risks and drawbacks. You should only download APK files from trusted sources, scan them with antivirus software, backup your device and data, and respect Juice WRLD's intellectual property rights.

    -

    If you have any questions about Juice WRLD music APKs, you might find the answers in these FAQs:

    -
      -
    • Q: Is it legal to download Juice WRLD music APKs?
    • -
    • A: It depends on where you live and what APK file you download. Some countries have strict laws against piracy and copyright infringement, while others have more lenient or unclear laws. Some APK files are official and legal apps from reputable sources, while others are unofficial and illegal apps from dubious sources. You should always check the legality and legitimacy of the APK file before downloading and installing it.
    • -
    • Q: Is it safe to download Juice WRLD music APKs?
    • -
    • A: It depends on the source and quality of the APK file. Some APK files are safe and secure to download and install on your device, while others might contain malware, viruses, spyware, ads, or bugs. You should always scan the APK file with a reliable antivirus software before opening it. You should also backup your device and data before installing any APK file, in case something goes wrong.
    • -
    • Q: Is it compatible to download Juice WRLD music APKs?
    • -
    • A: It depends on your device and Android version. Some APK files are compatible with most devices and Android versions, while others might not work properly on some devices or Android versions. You should always check the compatibility and requirements of the APK file before downloading and installing it. You should also update your device and Android version regularly to avoid any compatibility issues.
    • -
    • Q: Is it ethical to download Juice WRLD music APKs?
    • -
    • A: It depends on your personal values and beliefs. Some people might think that downloading Juice WRLD music APKs is a way of honoring and supporting his legacy and music, while others might think that downloading Juice WRLD music APKs is a way of disrespecting and exploiting his intellectual property rights and music. You should always respect Juice WRLD's wishes and intentions regarding his music, and follow your own conscience and morals.
    • -
    • Q: Is it worth it to download Juice WRLD music APKs?
    • -
    • A: It depends on your preferences and expectations. Some people might find that downloading Juice WRLD music APKs is a convenient and enjoyable way of accessing his music on their Android device, while others might find that downloading Juice WRLD music APKs is a risky and troublesome way of accessing his music on their Android device. You should always weigh the pros and cons of downloading Juice WRLD music APKs, and decide for yourself if it is worth it or not.
    • -
    -

    I hope you found this article helpful and informative. If you have any feedback or suggestions, please let me know in the comments section below. Thank you for reading!

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Skat am Stammtisch Skat spielen ohne Risiko und mit vielen Tipps.md b/spaces/congsaPfin/Manga-OCR/logs/Skat am Stammtisch Skat spielen ohne Risiko und mit vielen Tipps.md deleted file mode 100644 index a613b275f39511103c8a7aea38c80bcea0797285..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Skat am Stammtisch Skat spielen ohne Risiko und mit vielen Tipps.md +++ /dev/null @@ -1,120 +0,0 @@ - -

    Skat Free Download Vollversion: How to Play Germany's National Card Game

    -

    Skat is a card game that has been played in Germany for over two centuries. It is considered as Germany's national card game and one of the most challenging and rewarding trick-taking games in the world. If you want to learn how to play skat, or if you are already a fan of the game, you might be interested in downloading skat for free on your computer or mobile device. In this article, we will explain what skat is, how it originated, how to play it online for free, and some tips and tricks to improve your skills.

    -

    What is Skat and How Did It Originate?

    -

    Skat is a card game for three players, but usually four participate, with each player sitting out a turn as dealer. It uses a 32-card deck, with four suits (clubs, spades, hearts, diamonds) and eight cards in each suit (7, 8, 9, 10, jack, queen, king, ace). The game is a mixture of Schafkopf, Tarok (Tarot), and l'Hombre. It was created in 1840 in Altenburg, Germany by members of a local Tarock club. The game spread rapidly within German-speaking Europe and also in the US and is now one of the most popular card games in Germany as well as being considered Germany's national card game.

    -

    skat free download vollversion


    Downloadhttps://urlca.com/2uO8Yp



    -

    The History of Skat

    -

    The early history of skat is not fully documented, but many anecdotes and legends exist about its origins. What is certain is that the game was developed around 1809 in Altenburg from older card games. It was based on Dreiwendsch, which is a variant of the Wendish Schafkopf. The concept of bidding was adopted from l'Hombre and its simplified version, German Solo; the idea of setting aside two cards (the skat) was taken from Tarock. Doppelkopf, however, is not one of skat's precursors but, like it, is a 20th century development of Schafkopf.

    -

    The first skat players and "inventors" of the game were Altenburg dignitaries and members of a local Tarock club: grammar school teacher Johann Friedrich Ludwig Hempel (1773-1849), medical health officer Dr. Hans Carl Leopold Schuderoff, court advocate and notary Friedrich Ferdinand Hempel (1778-1836), Councillor Carl Christian Adam Neefe (1774-1821) and Chancellor Hans Carl Leopold von der Gabelentz (1778–1831). Another participant in the rounds was well-known publisher Friedrich Arnold Brockhaus. The game was first known as Ore Mountain Schafkopf (Erzgebirgischer Schafkopf).

    -

    A verifiable written record of the new game can be found in an article about Osterland games in edition no. 30 of the weekly Osterländische Blätter published in Altenburg on 25 July 1818 under the heading "Das Skadspiel". In the years that followed the game spread more and more, especially among the students of Thuringian and Saxon universities and was soon popular in large parts of German-speaking Europe. The earliest recorded rules for "Scat" date to 1835, by when it was already popular in the Kingdom of Saxony, especially in the Duchy of Altenburg and the

    The Rules of Skat

    -

    The rules of skat are complex and can vary depending on the region and the preferences of the players. However, the basic structure of the game is as follows:

    -
      -
    • The dealer shuffles and deals 10 cards to each player, face down, in batches of three, four, and three. The remaining two cards are placed face down in the middle of the table as the skat.
    • -
    • The player to the left of the dealer is the forehand, the next player is the middlehand, and the dealer is the rearhand. The forehand starts the bidding by either passing or making an offer to the middlehand. The offer is a number that represents the minimum value of a contract that the player is willing to play. The middlehand can either accept or raise the offer, or pass. If the middlehand passes, the forehand can make a new offer to the rearhand, who can also accept, raise, or pass. If both opponents pass, the forehand becomes the declarer and can choose any contract. If only one opponent passes, the bidding continues between the two remaining players until one of them passes or accepts an offer. The winner of the bidding becomes the declarer and can choose a contract.
    • -
    • The declarer can either play with or without the skat. If he plays with the skat, he picks up the two cards and discards two cards face down. If he plays without the skat, he leaves it untouched and announces a contract that is worth at least 18 points more than his bid. The contract determines which suit is trump, how many points are needed to win, and how many multipliers are applied to the final score.
    • -
    • The game consists of 10 tricks, each consisting of one card played by each player. The forehand leads to the first trick, and then the winner of each trick leads to the next one. The players must follow suit if possible; otherwise they can play any card. A trick is won by either playing the highest trump card or, if no trump card is played, by playing the highest card of the led suit.
    • -
    • After all 10 tricks are played, the declarer counts his card points and compares them with his contract value. Card points are calculated by adding up the values of all cards won in tricks: aces are worth 11 points, 10s are worth 10 points, kings are worth 4 points, queens are worth 3 points, jacks are worth 2 points, and 7s, 8s, and 9s are worth 0 points. There are 120 card points in total in each deal. The contract value is calculated by multiplying the base value of the chosen suit by a number of factors that depend on various conditions such as whether the declarer used or did not use 36 (the hand value of diamonds). If you have four jacks in a row (clubs, spades, hearts, diamonds), your hand value is 4 times 11 (the base value of hearts) = 44 (the hand value of hearts). The base values of the suits are: clubs = 12, spades = 11, hearts = 10, diamonds = 9. The hand values of the suits are: grand = 24, clubs = 12, spades = 11, hearts = 10, diamonds = 9. Grand is a special contract where only jacks are trump and the base value is 24.

      -

      skat free download vollversion windows 10
      -skat free download vollversion deutsch
      -skat free download vollversion ohne anmeldung
      -skat free download vollversion microsoft store
      -skat free download vollversion offline
      -skat free download vollversion pc
      -skat free download vollversion app
      -skat free download vollversion android
      -skat free download vollversion mac
      -skat free download vollversion ios
      -skat free download vollversion stammtisch
      -skat free download vollversion turnier
      -skat free download vollversion ramsch
      -skat free download vollversion regeln
      -skat free download vollversion anfänger
      -skat free download vollversion experte
      -skat free download vollversion kartenbilder
      -skat free download vollversion spielgeschwindigkeit
      -skat free download vollversion spielstatistik
      -skat free download vollversion spielanalyse
      -skat free download vollversion akademie
      -skat free download vollversion tipps
      -skat free download vollversion tricks
      -skat free download vollversion bewertung
      -skat free download vollversion erfahrung
      -skat free download vollversion test
      -skat free download vollversion vergleich
      -skat free download vollversion alternative
      -skat free download vollversion kostenlos spielen
      -skat free download vollversion online spielen
      -skat free download vollversion multiplayer spielen
      -skat free download vollversion gegen computer spielen
      -skat free download vollversion ohne internet spielen
      -skat free download vollversion mit freunden spielen
      -skat free download vollversion mit sprachausgabe spielen
      -skat free download vollversion mit bockrunden spielen
      -skat free download vollversion mit kontra und re spielen
      -skat free download vollversion mit schneider und schwarz spielen
      -skat free download vollversion mit null und grand spielen
      -skat free download vollversion mit seeger fabian system spielen
      -skat freeware herunterladen und installieren
      -skat freeware für windows-pc
      -skat freeware für apple-mac
      -skat freeware für android-smartphone
      -skat freeware für ios-tablet
      -skat freeware ohne werbung
      -skat freeware ohne in-app-käufe
      -skat freeware ohne registrierung
      -skat freeware ohne zeitlimit

      -

      To bid, you need to announce a number that is a multiple of the base value of the suit you want to play. For example, if you want to play clubs, you can bid 12, 24, 36, and so on. If you want to play grand, you can bid 24, 48, 72, and so on. You can also bid null, which is a special contract where you try to lose all the tricks and the base value is 23. You can only bid null if you have no jacks in your hand.

      -

      To declare a contract, you need to announce the suit you want to play as trump or null. You can also announce some special conditions that increase the value of your contract, such as hand (playing without the skat), schneider (winning at least 90 card points or making your opponent win less than 30), schwarz (winning all the tricks or making your opponent win none), ouvert (playing with your cards face up), or reizen (bidding up to the maximum value of your hand). However, some of these conditions require you to announce them before picking up or discarding the skat.

      -

      When bidding and declaring a contract, you should consider the following factors:

      -
        -
      • The strength of your hand: how many jacks and high cards you have, how well they fit together in a suit, how many cards you have in each suit.
      • -
      • The position of your hand: whether you are forehand, middlehand, or rearhand. Forehand has the advantage of starting the bidding and leading to the first trick, but also has the disadvantage of revealing information first. Middlehand has the advantage of being able to pass or raise the offer of forehand, but also has the disadvantage of being sandwiched between two opponents. Rearhand has the advantage of being able to observe the bidding of the other two players and make the final decision, but also has the disadvantage of being last to act and having less control over the game.
      • -
      • The expectations of your opponents: what they might have in their hands based on their bidding and playing behavior, what they might expect from you based on your bidding and playing behavior.
      • -
      -

      How to Win or Lose Tricks

      -

      Winning or losing tricks is another crucial aspect of skat. It determines whether you fulfill your contract or not. To win or lose tricks effectively, you need to know how to play your cards and how to follow suit.

      -

      To play your cards, you need to consider the following factors:

      -
        -
      • The suit and rank of your cards: whether they are trump or not, whether they are high or low, whether they are singletons or part of a sequence.
      • -
      • The suit and rank of the cards already played: whether they are trump or not, whether they are high or low, whether they are singletons or part of a sequence.
      • -
      • The suit and rank of the cards still in play: whether they are trump or not, whether they are high or low, whether they are singletons or part of a sequence.
      • -
      • The number of tricks already played and remaining: how many tricks you have won or lost so far, how many tricks you need to win or lose to fulfill your contract.
      • -
      • The strategy and tactics of your opponents: what they might have in their hands based on their bidding and playing behavior, what they might do next based on their bidding and playing behavior.
      • -
      -

      To follow suit, you need to follow these rules:

      -
        -
      • If the first card played in a trick is a trump card, you must play a trump card if possible; otherwise you can play any card.
      • -
      • If the first card played in a trick is not a trump card, you must play a card of the same suit if possible; otherwise you can play any card.
      • -
      • If you are the declarer and you play without the skat (hand game), you can choose not to follow suit and play any card; however, this will increase the value of your contract by one multiplier.
      • -
      -

      How to Use the Skat and the Multipliers3>How to Use the Skat and the Multipliers

      -

      The skat and the multipliers are two elements that can make a big difference in your score. The skat is the two cards that are set aside at the beginning of the game. The multipliers are the factors that increase the value of your contract.

      -

      The skat can be used in two ways: you can either play with it or without it. If you play with it, you can pick up the two cards and discard two cards of your choice. This can help you improve your hand and choose a better contract. However, you also have to announce your contract before looking at the skat, which can be risky. If you play without it, you can leave the skat untouched and announce a contract that is worth at least 18 points more than your bid. This can give you an advantage over your opponents, who do not know what is in the skat. However, you also have to play with the cards you were dealt, which can be challenging.

      -

      The multipliers are the factors that increase the value of your contract by multiplying the base value of your suit. There are several multipliers that can apply to your contract, depending on various conditions. The most common ones are:

      -
        -
      • Matadors: The number of jacks in a row that you have or do not have, starting from the jack of clubs. For example, if you have three jacks in a row (clubs, spades, hearts), you have with 3 matadors; if you have no jacks, you have against 4 matadors.
      • -
      • Hand: Playing without the skat. This adds one multiplier to your contract.
      • -
      • Schneider: Winning at least 90 card points or making your opponent win less than 30 card points. This adds one multiplier to your contract if you announce it before picking up or discarding the skat; otherwise it adds two multipliers.
      • -
      • Schwarz: Winning all the tricks or making your opponent win none. This adds one multiplier to your contract if you announce it before picking up or discarding the skat; otherwise it adds three multipliers.
      • -
      • Ouvert: Playing with your cards face up. This adds one multiplier to your contract if you announce it before picking up or discarding the skat; otherwise it adds four multipliers.
      • -
      -

      To calculate the value of your contract, you need to multiply the base value of your suit by the number of multipliers that apply to your contract. For example, if you play clubs with 3 matadors and schneider announced, your contract value is 12 (the base value of clubs) times 5 (the number of multipliers: 3 for matadors, 1 for hand, 1 for schneider) = 60.

      -

      Conclusion

      -

      Skat is a fascinating card game that combines skill, strategy, and luck. It is Germany's national card game and one of the most popular and challenging trick-taking games in the world. If you want to play skat online for free, you can download Skat Free from Microsoft Store, Skat am Stammtisch from Google Play, or Skat-Freeware from Skat Palast. You can also learn how to bid and declare a contract, how to win or lose tricks, and how to use the skat and the multipliers to improve your score. Skat is a game that will keep you entertained and challenged for hours.

      -

      FAQs

      -

      Here are some frequently asked questions about skat:

      -
        -
      1. What is the difference between grand and null?
      2. -

        Grand and null are two special contracts that have different rules and values than the normal suit contracts. In grand, only jacks are trump and the base value is 24. In null, there are no trumps and no card points; the declarer tries to lose all the tricks and the base value is 23.

        -
      3. What is ramsch?
      4. -

        Ramsch is a variant of skat that is played when all three players pass in the bidding. In ramsch, there are no trumps and no contracts; each player tries to avoid taking any tricks. The player who takes the most card points loses and pays to the other two players.

        -
      5. What is bock?
      6. -

        Bock is a variant of skat that is played when a special event occurs, such as a player losing a grand hand game or a player winning a null ouvert game. In bock, all scores are doubled for one round.

        -
      7. What is kontra and re?
      8. -

        Kontra and re are optional rules that allow players to increase the stakes of the game. Kontra means that an opponent of the declarer challenges his contract by saying "kontra" before the first card is played. This doubles the score of the game, whether the declarer wins or loses. Re means that the declarer accepts the challenge by saying "re" before the second card is played. This doubles the score again, making it four times the original value.

        -
      9. What is schafkopf?
      10. -

        Schafkopf is a card game that is closely related to skat. It is also played with a 32-card deck and four players, but with different rules and scoring. It is mainly popular in Bavaria and other parts of southern Germany.

        -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/Generateur Cle Premium Uptobox.md b/spaces/contluForse/HuggingGPT/Generateur Cle Premium Uptobox.md deleted file mode 100644 index 284c2f99942f13105bb5c668fac7affc8cf022b6..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/Generateur Cle Premium Uptobox.md +++ /dev/null @@ -1,104 +0,0 @@ -## Generateur Cle Premium Uptobox - - - - - - - - - -**Download > [https://urluso.com/2txV3Y](https://urluso.com/2txV3Y)** - - - - - - - - - - - - Here is a possible title and article with html formatting for the keyword "Generateur Cle Premium Uptobox": - -# How to Use Generateur Cle Premium Uptobox to Download Files Faster - - - -Uptobox is a popular file-sharing platform that allows users to upload and download various types of files, such as movies, songs, magazines, etc. However, if you are a free user of Uptobox, you may face some limitations, such as slow download speed, limited storage space, and short file availability. To overcome these drawbacks, you may want to upgrade to a premium account, which offers faster download speed, unlimited storage space, and longer file availability. But what if you don't want to pay for a premium account? Is there a way to enjoy the benefits of a premium account without spending any money? - - - -The answer is yes! You can use Generateur Cle Premium Uptobox, a website that helps you generate a premium link that you can use on Uptobox. By using this link, you will be recognized as a premium user of Uptobox, even though you are using it for free. How does it work? Let's find out! - - - -## What is Generateur Cle Premium Uptobox? - - - -Generateur Cle Premium Uptobox is a website that allows you to create a premium link that you can use on Uptobox. This link will bypass the restrictions imposed on free users and let you download files faster and easier. You don't need to register or pay anything to use this service. All you need is a valid Uptobox link that you want to download. - - - -## How to use Generateur Cle Premium Uptobox? - - - -Using Generateur Cle Premium Uptobox is very simple and straightforward. Here are the steps you need to follow: - - - -1. Paste your Uptobox link in the box on the homepage of Generateur Cle Premium Uptobox. - -2. Click on the "Generate" button to start creating your premium link. - -3. Wait for a few seconds until your premium link is ready. - -4. Copy your premium link and paste it in your browser or download manager. - -5. Enjoy your fast and easy download! - - - -## What are the advantages of using Generateur Cle Premium Uptobox? - - - -By using Generateur Cle Premium Uptobox, you can enjoy many benefits that are usually reserved for premium users of Uptobox. Some of these benefits are: - - - -- You can download files at full speed without any limitations. - -- You can download multiple files at the same time without waiting for queues. - -- You can resume your downloads if they are interrupted due to network issues or other reasons. - -- You can download files of any size without any restrictions. - -- You can access your files for longer periods without worrying about them being deleted. - - - -## Is Generateur Cle Premium Uptobox safe and legal? - - - -Generateur Cle Premium Uptobox is safe and legal to use. It does not store any of your personal information or files on its servers. It only generates a temporary link that you can use to download your files from Uptobox. It does not violate any terms of service or copyright laws of Uptobox or any other parties. However, you should always be careful about what files you download from the internet and make sure they are not illegal or harmful. - - - -## Conclusion - - - -If you are looking for a way to download files faster and easier from Uptobox without paying for a premium account, you should try Generateur Cle Premium Uptobox. It is a free and reliable service that helps you generate a premium link that you can use on Uptobox. By using this link, you can enjoy the benefits of a premium account without spending any money. You can download files at full speed, resume your downloads, access your files for longer periods, and more. Try it now and see the difference! - - dfd1c89656 - - - - - diff --git a/spaces/contluForse/HuggingGPT/assets/FEM 1.001 PDF __HOT__.md b/spaces/contluForse/HuggingGPT/assets/FEM 1.001 PDF __HOT__.md deleted file mode 100644 index 9f1eef28dd9a84af089b1357535ede7e60951f23..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/FEM 1.001 PDF __HOT__.md +++ /dev/null @@ -1,9 +0,0 @@ - -

      disc brake sb 38 dimensions and technical data rev. 12-06 1) if ordered with manual release lever *) average friction factor of standard material combination for crane brake layout use safety factors documented in the fem 1.001, section 1 all dimensions in mm alterations reserved without notice thruster type brake torque mbr in nm friction factor = 0,4*

      -

      double bearing sb 8.3 dimensions and technical data rev. 06-13 *) if ordered with manual release lever dimension a: with automatic wear compensator max. 280 mm without automatic wear compensator max. 220 mm right-hand execution left-hand execution *) average friction factor of standard material combination all dimensions in mm alterations reserved without notice for crane brake layout use safety factors documented in the fem 1.001, section 1 thruster type

      -

      FEM 1.001 PDF


      Download Ziphttps://ssurll.com/2uzxjF



      -

      double bearing sb 28 dimensions and technical data rev. 10-08 1) if ordered with manual release lever *) average friction factor of standard material combination for crane brake layout use safety factors documented in the fem 1.001, section 1 all dimensions in mm alterations reserved without notice thruster type brake torque mbr in nm friction factor = 0,4*

      -

      double bearing sb 38 dimensions and technical data rev. 12-06 1) if ordered with manual release lever *) average friction factor of standard material combination for crane brake layout use safety factors documented in the fem 1.001, section 1 all dimensions in mm alterations reserved without notice thruster type brake torque mbr in nm friction factor = 0,4*

      -

      single bearing sb 8.3 dimensions and technical data rev. 06-13 *) if ordered with manual release lever dimension a: with automatic wear compensator max. 280 mm without automatic wear compensator max. 220 mm right-hand execution left-hand execution *) average friction factor of standard material combination all dimensions in mm alterations reserved without notice for crane brake layout use safety factors documented in the fem 1.001, section 1 thruster type

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/coqui/ml-trivia/README.md b/spaces/coqui/ml-trivia/README.md deleted file mode 100644 index 6414e60f552886dd3dc6f5d20f739ede7f4a8624..0000000000000000000000000000000000000000 --- a/spaces/coqui/ml-trivia/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: ML Trivia Challenge -emoji: 🐸 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.48.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/configs/_base_/schedules/schedule_20k.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/configs/_base_/schedules/schedule_20k.py deleted file mode 100644 index bf780a1b6f6521833c6a5859675147824efa599d..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/configs/_base_/schedules/schedule_20k.py +++ /dev/null @@ -1,9 +0,0 @@ -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() -# learning policy -lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) -# runtime settings -runner = dict(type='IterBasedRunner', max_iters=20000) -checkpoint_config = dict(by_epoch=False, interval=2000) -evaluation = dict(interval=2000, metric='mIoU') diff --git a/spaces/dakaiye/dky_xuexi/request_llm/bridge_chatgpt.py b/spaces/dakaiye/dky_xuexi/request_llm/bridge_chatgpt.py deleted file mode 100644 index eef8fbf0b43f30b915f770f4bc54120c84ebd092..0000000000000000000000000000000000000000 --- a/spaces/dakaiye/dky_xuexi/request_llm/bridge_chatgpt.py +++ /dev/null @@ -1,285 +0,0 @@ -# 借鉴了 https://github.com/GaiZhenbiao/ChuanhuChatGPT 项目 - -""" - 该文件中主要包含三个函数 - - 不具备多线程能力的函数: - 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 - - 具备多线程调用能力的函数 - 2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑 - 3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程 -""" - -import json -import time -import gradio as gr -import logging -import traceback -import requests -import importlib - -# config_private.py放自己的秘密如API和代理网址 -# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件 -from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc -proxies, API_KEY, TIMEOUT_SECONDS, MAX_RETRY = \ - get_conf('proxies', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY') - -timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \ - '网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。' - -def get_full_error(chunk, stream_response): - """ - 获取完整的从Openai返回的报错 - """ - while True: - try: - chunk += next(stream_response) - except: - break - return chunk - - -def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False): - """ - 发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。 - inputs: - 是本次问询的输入 - sys_prompt: - 系统静默prompt - llm_kwargs: - chatGPT的内部调优参数 - history: - 是之前的对话列表 - observe_window = None: - 用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗 - """ - watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可 - headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True) - retry = 0 - while True: - try: - # make a POST request to the API endpoint, stream=False - from .bridge_all import model_info - endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] - response = requests.post(endpoint, headers=headers, proxies=proxies, - json=payload, stream=True, timeout=TIMEOUT_SECONDS); break - except requests.exceptions.ReadTimeout as e: - retry += 1 - traceback.print_exc() - if retry > MAX_RETRY: raise TimeoutError - if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') - - stream_response = response.iter_lines() - result = '' - while True: - try: chunk = next(stream_response).decode() - except StopIteration: - break - except requests.exceptions.ConnectionError: - chunk = next(stream_response).decode() # 失败了,重试一次?再失败就没办法了。 - if len(chunk)==0: continue - if not chunk.startswith('data:'): - error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode() - if "reduce the length" in error_msg: - raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg) - else: - raise RuntimeError("OpenAI拒绝了请求:" + error_msg) - if ('data: [DONE]' in chunk): break # api2d 正常完成 - json_data = json.loads(chunk.lstrip('data:'))['choices'][0] - delta = json_data["delta"] - if len(delta) == 0: break - if "role" in delta: continue - if "content" in delta: - result += delta["content"] - if not console_slience: print(delta["content"], end='') - if observe_window is not None: - # 观测窗,把已经获取的数据显示出去 - if len(observe_window) >= 1: observe_window[0] += delta["content"] - # 看门狗,如果超过期限没有喂狗,则终止 - if len(observe_window) >= 2: - if (time.time()-observe_window[1]) > watch_dog_patience: - raise RuntimeError("用户取消了程序。") - else: raise RuntimeError("意外Json结构:"+delta) - if json_data['finish_reason'] == 'length': - raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。") - return result - - -def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): - """ - 发送至chatGPT,流式获取输出。 - 用于基础的对话功能。 - inputs 是本次问询的输入 - top_p, temperature是chatGPT的内部调优参数 - history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误) - chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容 - additional_fn代表点击的哪个按钮,按钮见functional.py - """ - if is_any_api_key(inputs): - chatbot._cookies['api_key'] = inputs - chatbot.append(("输入已识别为openai的api_key", what_keys(inputs))) - yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面 - return - elif not is_any_api_key(chatbot._cookies['api_key']): - chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")) - yield from update_ui(chatbot=chatbot, history=history, msg="缺少api_key") # 刷新界面 - return - - if additional_fn is not None: - import core_functional - importlib.reload(core_functional) # 热更新prompt - core_functional = core_functional.get_core_functions() - if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话) - inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"] - - raw_input = inputs - logging.info(f'[raw_input] {raw_input}') - chatbot.append((inputs, "")) - yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面 - - try: - headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream) - except RuntimeError as e: - chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。") - yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面 - return - - history.append(inputs); history.append("") - - retry = 0 - while True: - try: - # make a POST request to the API endpoint, stream=True - from .bridge_all import model_info - endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] - response = requests.post(endpoint, headers=headers, proxies=proxies, - json=payload, stream=True, timeout=TIMEOUT_SECONDS);break - except: - retry += 1 - chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg)) - retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else "" - yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面 - if retry > MAX_RETRY: raise TimeoutError - - gpt_replying_buffer = "" - - is_head_of_the_stream = True - if stream: - stream_response = response.iter_lines() - while True: - try: - chunk = next(stream_response) - except StopIteration: - # 非OpenAI官方接口的出现这样的报错,OpenAI和API2D不会走这里 - from toolbox import regular_txt_to_markdown; tb_str = '```\n' + trimmed_format_exc() + '```' - chatbot[-1] = (chatbot[-1][0], f"[Local Message] 远程返回错误: \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk.decode())}") - yield from update_ui(chatbot=chatbot, history=history, msg="远程返回错误:" + chunk.decode()) # 刷新界面 - return - - # print(chunk.decode()[6:]) - if is_head_of_the_stream and (r'"object":"error"' not in chunk.decode()): - # 数据流的第一帧不携带content - is_head_of_the_stream = False; continue - - if chunk: - try: - chunk_decoded = chunk.decode() - # 前者API2D的 - if ('data: [DONE]' in chunk_decoded) or (len(json.loads(chunk_decoded[6:])['choices'][0]["delta"]) == 0): - # 判定为数据流的结束,gpt_replying_buffer也写完了 - logging.info(f'[response] {gpt_replying_buffer}') - break - # 处理数据流的主体 - chunkjson = json.loads(chunk_decoded[6:]) - status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}" - # 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出 - gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk_decoded[6:])['choices'][0]["delta"]["content"] - history[-1] = gpt_replying_buffer - chatbot[-1] = (history[-2], history[-1]) - yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面 - - except Exception as e: - traceback.print_exc() - yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面 - chunk = get_full_error(chunk, stream_response) - chunk_decoded = chunk.decode() - error_msg = chunk_decoded - if "reduce the length" in error_msg: - if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出 - history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'], - max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一 - chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)") - # history = [] # 清除历史 - elif "does not exist" in error_msg: - chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格.") - elif "Incorrect API key" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务.") - elif "exceeded your current quota" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务.") - elif "bad forward key" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.") - elif "Not enough point" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Not enough point. API2D账户点数不足.") - else: - from toolbox import regular_txt_to_markdown - tb_str = '```\n' + trimmed_format_exc() + '```' - chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}") - yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面 - return - -def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): - """ - 整合所有信息,选择LLM模型,生成http请求,为发送请求做准备 - """ - if not is_any_api_key(llm_kwargs['api_key']): - raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。") - - api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) - - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {api_key}" - } - - conversation_cnt = len(history) // 2 - - messages = [{"role": "system", "content": system_prompt}] - if conversation_cnt: - for index in range(0, 2*conversation_cnt, 2): - what_i_have_asked = {} - what_i_have_asked["role"] = "user" - what_i_have_asked["content"] = history[index] - what_gpt_answer = {} - what_gpt_answer["role"] = "assistant" - what_gpt_answer["content"] = history[index+1] - if what_i_have_asked["content"] != "": - if what_gpt_answer["content"] == "": continue - if what_gpt_answer["content"] == timeout_bot_msg: continue - messages.append(what_i_have_asked) - messages.append(what_gpt_answer) - else: - messages[-1]['content'] = what_gpt_answer['content'] - - what_i_ask_now = {} - what_i_ask_now["role"] = "user" - what_i_ask_now["content"] = inputs - messages.append(what_i_ask_now) - - payload = { - "model": llm_kwargs['llm_model'].strip('api2d-'), - "messages": messages, - "temperature": llm_kwargs['temperature'], # 1.0, - "top_p": llm_kwargs['top_p'], # 1.0, - "n": 1, - "stream": stream, - "presence_penalty": 0, - "frequency_penalty": 0, - } - try: - print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........") - except: - print('输入中可能存在乱码。') - return headers,payload - - diff --git a/spaces/darthPanda/Social_media_sentiment_tracker/README.md b/spaces/darthPanda/Social_media_sentiment_tracker/README.md deleted file mode 100644 index f75a3928fdbf67d70edc5275d7098990e1a107ae..0000000000000000000000000000000000000000 --- a/spaces/darthPanda/Social_media_sentiment_tracker/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Social Media Sentiment Tracker -emoji: 📈 -colorFrom: pink -colorTo: purple -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/dawdqd/ChuanhuChatGPT/web_assets/html/update.html b/spaces/dawdqd/ChuanhuChatGPT/web_assets/html/update.html deleted file mode 100644 index 6f005e11a0a11f441ff2c56d05b98402c640a53f..0000000000000000000000000000000000000000 --- a/spaces/dawdqd/ChuanhuChatGPT/web_assets/html/update.html +++ /dev/null @@ -1,29 +0,0 @@ -
      -
      -

      - {current_version} - {version_time} -

      -

      - Latest Version: getting latest version... -

      -

      - Getting update... -

      -
      -
      -
      - Getting Release Note... -
      -
      -
      - - -
      -
      - - -
      -
      -
      \ No newline at end of file diff --git a/spaces/dawood/audioldm-text-to-audio-generation/audioldm/clap/open_clip/timm_model.py b/spaces/dawood/audioldm-text-to-audio-generation/audioldm/clap/open_clip/timm_model.py deleted file mode 100644 index c9d1ab4666b5bab5038d44b90c9ddca5087de460..0000000000000000000000000000000000000000 --- a/spaces/dawood/audioldm-text-to-audio-generation/audioldm/clap/open_clip/timm_model.py +++ /dev/null @@ -1,112 +0,0 @@ -""" timm model adapter - -Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model. -""" -from collections import OrderedDict - -import torch.nn as nn - -try: - import timm - from timm.models.layers import Mlp, to_2tuple - from timm.models.layers.attention_pool2d import RotAttentionPool2d - from timm.models.layers.attention_pool2d import ( - AttentionPool2d as AbsAttentionPool2d, - ) -except ImportError as e: - timm = None - -from .utils import freeze_batch_norm_2d - - -class TimmModel(nn.Module): - """timm model adapter - # FIXME this adapter is a work in progress, may change in ways that break weight compat - """ - - def __init__( - self, - model_name, - embed_dim, - image_size=224, - pool="avg", - proj="linear", - drop=0.0, - pretrained=False, - ): - super().__init__() - if timm is None: - raise RuntimeError("Please `pip install timm` to use timm models.") - - self.image_size = to_2tuple(image_size) - self.trunk = timm.create_model(model_name, pretrained=pretrained) - feat_size = self.trunk.default_cfg.get("pool_size", None) - feature_ndim = 1 if not feat_size else 2 - if pool in ("abs_attn", "rot_attn"): - assert feature_ndim == 2 - # if attn pooling used, remove both classifier and default pool - self.trunk.reset_classifier(0, global_pool="") - else: - # reset global pool if pool config set, otherwise leave as network default - reset_kwargs = dict(global_pool=pool) if pool else {} - self.trunk.reset_classifier(0, **reset_kwargs) - prev_chs = self.trunk.num_features - - head_layers = OrderedDict() - if pool == "abs_attn": - head_layers["pool"] = AbsAttentionPool2d( - prev_chs, feat_size=feat_size, out_features=embed_dim - ) - prev_chs = embed_dim - elif pool == "rot_attn": - head_layers["pool"] = RotAttentionPool2d(prev_chs, out_features=embed_dim) - prev_chs = embed_dim - else: - assert proj, "projection layer needed if non-attention pooling is used." - - # NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used - if proj == "linear": - head_layers["drop"] = nn.Dropout(drop) - head_layers["proj"] = nn.Linear(prev_chs, embed_dim) - elif proj == "mlp": - head_layers["mlp"] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=drop) - - self.head = nn.Sequential(head_layers) - - def lock(self, unlocked_groups=0, freeze_bn_stats=False): - """lock modules - Args: - unlocked_groups (int): leave last n layer groups unlocked (default: 0) - """ - if not unlocked_groups: - # lock full model - for param in self.trunk.parameters(): - param.requires_grad = False - if freeze_bn_stats: - freeze_batch_norm_2d(self.trunk) - else: - # NOTE: partial freeze requires latest timm (master) branch and is subject to change - try: - # FIXME import here until API stable and in an official release - from timm.models.helpers import group_parameters, group_modules - except ImportError: - raise RuntimeError( - "Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`" - ) - matcher = self.trunk.group_matcher() - gparams = group_parameters(self.trunk, matcher) - max_layer_id = max(gparams.keys()) - max_layer_id = max_layer_id - unlocked_groups - for group_idx in range(max_layer_id + 1): - group = gparams[group_idx] - for param in group: - self.trunk.get_parameter(param).requires_grad = False - if freeze_bn_stats: - gmodules = group_modules(self.trunk, matcher, reverse=True) - gmodules = {k for k, v in gmodules.items() if v <= max_layer_id} - freeze_batch_norm_2d(self.trunk, gmodules) - - def forward(self, x): - x = self.trunk(x) - x = self.head(x) - return x diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/pens/reportLabPen.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/pens/reportLabPen.py deleted file mode 100644 index 2cb89c8bf4c772b7a987edb0593c40c83cc2201b..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/pens/reportLabPen.py +++ /dev/null @@ -1,80 +0,0 @@ -from fontTools.pens.basePen import BasePen -from reportlab.graphics.shapes import Path - - -__all__ = ["ReportLabPen"] - - -class ReportLabPen(BasePen): - - """A pen for drawing onto a ``reportlab.graphics.shapes.Path`` object.""" - - def __init__(self, glyphSet, path=None): - BasePen.__init__(self, glyphSet) - if path is None: - path = Path() - self.path = path - - def _moveTo(self, p): - (x, y) = p - self.path.moveTo(x, y) - - def _lineTo(self, p): - (x, y) = p - self.path.lineTo(x, y) - - def _curveToOne(self, p1, p2, p3): - (x1, y1) = p1 - (x2, y2) = p2 - (x3, y3) = p3 - self.path.curveTo(x1, y1, x2, y2, x3, y3) - - def _closePath(self): - self.path.closePath() - - -if __name__ == "__main__": - import sys - - if len(sys.argv) < 3: - print( - "Usage: reportLabPen.py []" - ) - print( - " If no image file name is created, by default .png is created." - ) - print(" example: reportLabPen.py Arial.TTF R test.png") - print( - " (The file format will be PNG, regardless of the image file name supplied)" - ) - sys.exit(0) - - from fontTools.ttLib import TTFont - from reportlab.lib import colors - - path = sys.argv[1] - glyphName = sys.argv[2] - if len(sys.argv) > 3: - imageFile = sys.argv[3] - else: - imageFile = "%s.png" % glyphName - - font = TTFont(path) # it would work just as well with fontTools.t1Lib.T1Font - gs = font.getGlyphSet() - pen = ReportLabPen(gs, Path(fillColor=colors.red, strokeWidth=5)) - g = gs[glyphName] - g.draw(pen) - - w, h = g.width, 1000 - from reportlab.graphics import renderPM - from reportlab.graphics.shapes import Group, Drawing, scale - - # Everything is wrapped in a group to allow transformations. - g = Group(pen.path) - g.translate(0, 200) - g.scale(0.3, 0.3) - - d = Drawing(w, h) - d.add(g) - - renderPM.drawToFile(d, imageFile, fmt="PNG") diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/ModifyUpload-77b0d4b2.css b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/ModifyUpload-77b0d4b2.css deleted file mode 100644 index c78d71f8b6eaf75f8134375ed017f1c03b6edf1a..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/ModifyUpload-77b0d4b2.css +++ /dev/null @@ -1 +0,0 @@ -div.svelte-116rqfv{cursor:pointer;width:var(--size-full);height:var(--size-full)}.center.svelte-116rqfv{text-align:center}.flex.svelte-116rqfv{display:flex;justify-content:center;align-items:center}input.svelte-116rqfv{display:none}div.svelte-19sk1im{display:flex;top:var(--size-2);right:var(--size-2);justify-content:flex-end;gap:var(--spacing-sm);z-index:var(--layer-1)}.not-absolute.svelte-19sk1im{margin:var(--size-1)} diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/huggingface_hub/templates/datasetcard_template.md b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/huggingface_hub/templates/datasetcard_template.md deleted file mode 100644 index 6d9281f9d3f119051b1eb7b2b016656b4dfa08fc..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/huggingface_hub/templates/datasetcard_template.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -# For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1 -# Doc / guide: https://huggingface.co/docs/hub/datasets-cards -{{ card_data }} ---- - -# Dataset Card for {{ pretty_name | default("Dataset Name", true) }} - -## Dataset Description - -- **Homepage:** {{ homepage_url | default("", true)}} -- **Repository:** {{ repo_url | default("", true)}} -- **Paper:** {{ paper_url | default("", true)}} -- **Leaderboard:** {{ leaderboard_url | default("", true)}} -- **Point of Contact:** {{ point_of_contact | default("", true)}} - -### Dataset Summary - -{{ dataset_summary | default("[More Information Needed]", true)}} - -### Supported Tasks and Leaderboards - -{{ supported_tasks_and_leaderboards_section | default("[More Information Needed]", true)}} - -### Languages - -{{ languages_section | default("[More Information Needed]", true)}} - -## Dataset Structure - -### Data Instances - -{{ data_instances_section | default("[More Information Needed]", true)}} - -### Data Fields - -{{ data_fields_section | default("[More Information Needed]", true)}} - -### Data Splits - -{{ data_splits_section | default("[More Information Needed]", true)}} - -## Dataset Creation - -### Curation Rationale - -{{ curation_rationale_section | default("[More Information Needed]", true)}} - -### Source Data - -#### Initial Data Collection and Normalization - -{{ data_collection_section | default("[More Information Needed]", true)}} - -#### Who are the source language producers? - -{{ source_language_producers_section | default("[More Information Needed]", true)}} - -### Annotations - -#### Annotation process - -{{ annotation_process_section | default("[More Information Needed]", true)}} - -#### Who are the annotators? - -{{ who_are_annotators_section | default("[More Information Needed]", true)}} - -### Personal and Sensitive Information - -{{ personal_and_sensitive_information_section | default("[More Information Needed]", true)}} - -## Considerations for Using the Data - -### Social Impact of Dataset - -{{ social_impact_section | default("[More Information Needed]", true)}} - -### Discussion of Biases - -{{ discussion_of_biases_section | default("[More Information Needed]", true)}} - -### Other Known Limitations - -{{ known_limitations_section | default("[More Information Needed]", true)}} - -## Additional Information - -### Dataset Curators - -{{ dataset_curators_section | default("[More Information Needed]", true)}} - -### Licensing Information - -{{ licensing_information_section | default("[More Information Needed]", true)}} - -### Citation Information - -{{ citation_information_section | default("[More Information Needed]", true)}} - -### Contributions - -{{ contributions_section | default("[More Information Needed]", true)}} diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/matplotlib/backends/backend_ps.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/matplotlib/backends/backend_ps.py deleted file mode 100644 index 75ed4ff5570b61bec8d3ef725dc732418ab4719d..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/matplotlib/backends/backend_ps.py +++ /dev/null @@ -1,1362 +0,0 @@ -""" -A PostScript backend, which can produce both PostScript .ps and .eps. -""" - -import codecs -import datetime -from enum import Enum -import functools -from io import StringIO -import itertools -import logging -import os -import pathlib -import re -import shutil -from tempfile import TemporaryDirectory -import time - -import numpy as np - -import matplotlib as mpl -from matplotlib import _api, cbook, _path, _text_helpers -from matplotlib._afm import AFM -from matplotlib.backend_bases import ( - _Backend, FigureCanvasBase, FigureManagerBase, RendererBase) -from matplotlib.cbook import is_writable_file_like, file_requires_unicode -from matplotlib.font_manager import get_font -from matplotlib.ft2font import LOAD_NO_SCALE, FT2Font -from matplotlib._ttconv import convert_ttf_to_ps -from matplotlib._mathtext_data import uni2type1 -from matplotlib.path import Path -from matplotlib.texmanager import TexManager -from matplotlib.transforms import Affine2D -from matplotlib.backends.backend_mixed import MixedModeRenderer -from . import _backend_pdf_ps - - -_log = logging.getLogger(__name__) -debugPS = False - - -@_api.deprecated("3.7") -class PsBackendHelper: - def __init__(self): - self._cached = {} - - -@_api.caching_module_getattr -class __getattr__: - # module-level deprecations - ps_backend_helper = _api.deprecated("3.7", obj_type="")( - property(lambda self: PsBackendHelper())) - - -papersize = {'letter': (8.5, 11), - 'legal': (8.5, 14), - 'ledger': (11, 17), - 'a0': (33.11, 46.81), - 'a1': (23.39, 33.11), - 'a2': (16.54, 23.39), - 'a3': (11.69, 16.54), - 'a4': (8.27, 11.69), - 'a5': (5.83, 8.27), - 'a6': (4.13, 5.83), - 'a7': (2.91, 4.13), - 'a8': (2.05, 2.91), - 'a9': (1.46, 2.05), - 'a10': (1.02, 1.46), - 'b0': (40.55, 57.32), - 'b1': (28.66, 40.55), - 'b2': (20.27, 28.66), - 'b3': (14.33, 20.27), - 'b4': (10.11, 14.33), - 'b5': (7.16, 10.11), - 'b6': (5.04, 7.16), - 'b7': (3.58, 5.04), - 'b8': (2.51, 3.58), - 'b9': (1.76, 2.51), - 'b10': (1.26, 1.76)} - - -def _get_papertype(w, h): - for key, (pw, ph) in sorted(papersize.items(), reverse=True): - if key.startswith('l'): - continue - if w < pw and h < ph: - return key - return 'a0' - - -def _nums_to_str(*args): - return " ".join(f"{arg:1.3f}".rstrip("0").rstrip(".") for arg in args) - - -@_api.deprecated("3.6", alternative="a vendored copy of this function") -def quote_ps_string(s): - """ - Quote dangerous characters of S for use in a PostScript string constant. - """ - s = s.replace(b"\\", b"\\\\") - s = s.replace(b"(", b"\\(") - s = s.replace(b")", b"\\)") - s = s.replace(b"'", b"\\251") - s = s.replace(b"`", b"\\301") - s = re.sub(br"[^ -~\n]", lambda x: br"\%03o" % ord(x.group()), s) - return s.decode('ascii') - - -def _move_path_to_path_or_stream(src, dst): - """ - Move the contents of file at *src* to path-or-filelike *dst*. - - If *dst* is a path, the metadata of *src* are *not* copied. - """ - if is_writable_file_like(dst): - fh = (open(src, 'r', encoding='latin-1') - if file_requires_unicode(dst) - else open(src, 'rb')) - with fh: - shutil.copyfileobj(fh, dst) - else: - shutil.move(src, dst, copy_function=shutil.copyfile) - - -def _font_to_ps_type3(font_path, chars): - """ - Subset *chars* from the font at *font_path* into a Type 3 font. - - Parameters - ---------- - font_path : path-like - Path to the font to be subsetted. - chars : str - The characters to include in the subsetted font. - - Returns - ------- - str - The string representation of a Type 3 font, which can be included - verbatim into a PostScript file. - """ - font = get_font(font_path, hinting_factor=1) - glyph_ids = [font.get_char_index(c) for c in chars] - - preamble = """\ -%!PS-Adobe-3.0 Resource-Font -%%Creator: Converted from TrueType to Type 3 by Matplotlib. -10 dict begin -/FontName /{font_name} def -/PaintType 0 def -/FontMatrix [{inv_units_per_em} 0 0 {inv_units_per_em} 0 0] def -/FontBBox [{bbox}] def -/FontType 3 def -/Encoding [{encoding}] def -/CharStrings {num_glyphs} dict dup begin -/.notdef 0 def -""".format(font_name=font.postscript_name, - inv_units_per_em=1 / font.units_per_EM, - bbox=" ".join(map(str, font.bbox)), - encoding=" ".join("/{}".format(font.get_glyph_name(glyph_id)) - for glyph_id in glyph_ids), - num_glyphs=len(glyph_ids) + 1) - postamble = """ -end readonly def - -/BuildGlyph { - exch begin - CharStrings exch - 2 copy known not {pop /.notdef} if - true 3 1 roll get exec - end -} _d - -/BuildChar { - 1 index /Encoding get exch get - 1 index /BuildGlyph get exec -} _d - -FontName currentdict end definefont pop -""" - - entries = [] - for glyph_id in glyph_ids: - g = font.load_glyph(glyph_id, LOAD_NO_SCALE) - v, c = font.get_path() - entries.append( - "/%(name)s{%(bbox)s sc\n" % { - "name": font.get_glyph_name(glyph_id), - "bbox": " ".join(map(str, [g.horiAdvance, 0, *g.bbox])), - } - + _path.convert_to_string( - # Convert back to TrueType's internal units (1/64's). - # (Other dimensions are already in these units.) - Path(v * 64, c), None, None, False, None, 0, - # No code for quad Beziers triggers auto-conversion to cubics. - # Drop intermediate closepolys (relying on the outline - # decomposer always explicitly moving to the closing point - # first). - [b"m", b"l", b"", b"c", b""], True).decode("ascii") - + "ce} _d" - ) - - return preamble + "\n".join(entries) + postamble - - -def _font_to_ps_type42(font_path, chars, fh): - """ - Subset *chars* from the font at *font_path* into a Type 42 font at *fh*. - - Parameters - ---------- - font_path : path-like - Path to the font to be subsetted. - chars : str - The characters to include in the subsetted font. - fh : file-like - Where to write the font. - """ - subset_str = ''.join(chr(c) for c in chars) - _log.debug("SUBSET %s characters: %s", font_path, subset_str) - try: - fontdata = _backend_pdf_ps.get_glyphs_subset(font_path, subset_str) - _log.debug("SUBSET %s %d -> %d", font_path, os.stat(font_path).st_size, - fontdata.getbuffer().nbytes) - - # Give ttconv a subsetted font along with updated glyph_ids. - font = FT2Font(fontdata) - glyph_ids = [font.get_char_index(c) for c in chars] - with TemporaryDirectory() as tmpdir: - tmpfile = os.path.join(tmpdir, "tmp.ttf") - - with open(tmpfile, 'wb') as tmp: - tmp.write(fontdata.getvalue()) - - # TODO: allow convert_ttf_to_ps to input file objects (BytesIO) - convert_ttf_to_ps(os.fsencode(tmpfile), fh, 42, glyph_ids) - except RuntimeError: - _log.warning( - "The PostScript backend does not currently " - "support the selected font.") - raise - - -def _log_if_debug_on(meth): - """ - Wrap `RendererPS` method *meth* to emit a PS comment with the method name, - if the global flag `debugPS` is set. - """ - @functools.wraps(meth) - def wrapper(self, *args, **kwargs): - if debugPS: - self._pswriter.write(f"% {meth.__name__}\n") - return meth(self, *args, **kwargs) - - return wrapper - - -class RendererPS(_backend_pdf_ps.RendererPDFPSBase): - """ - The renderer handles all the drawing primitives using a graphics - context instance that controls the colors/styles. - """ - - _afm_font_dir = cbook._get_data_path("fonts/afm") - _use_afm_rc_name = "ps.useafm" - - def __init__(self, width, height, pswriter, imagedpi=72): - # Although postscript itself is dpi independent, we need to inform the - # image code about a requested dpi to generate high resolution images - # and them scale them before embedding them. - super().__init__(width, height) - self._pswriter = pswriter - if mpl.rcParams['text.usetex']: - self.textcnt = 0 - self.psfrag = [] - self.imagedpi = imagedpi - - # current renderer state (None=uninitialised) - self.color = None - self.linewidth = None - self.linejoin = None - self.linecap = None - self.linedash = None - self.fontname = None - self.fontsize = None - self._hatches = {} - self.image_magnification = imagedpi / 72 - self._clip_paths = {} - self._path_collection_id = 0 - - self._character_tracker = _backend_pdf_ps.CharacterTracker() - self._logwarn_once = functools.lru_cache(None)(_log.warning) - - def _is_transparent(self, rgb_or_rgba): - if rgb_or_rgba is None: - return True # Consistent with rgbFace semantics. - elif len(rgb_or_rgba) == 4: - if rgb_or_rgba[3] == 0: - return True - if rgb_or_rgba[3] != 1: - self._logwarn_once( - "The PostScript backend does not support transparency; " - "partially transparent artists will be rendered opaque.") - return False - else: # len() == 3. - return False - - def set_color(self, r, g, b, store=True): - if (r, g, b) != self.color: - self._pswriter.write(f"{r:1.3f} setgray\n" - if r == g == b else - f"{r:1.3f} {g:1.3f} {b:1.3f} setrgbcolor\n") - if store: - self.color = (r, g, b) - - def set_linewidth(self, linewidth, store=True): - linewidth = float(linewidth) - if linewidth != self.linewidth: - self._pswriter.write("%1.3f setlinewidth\n" % linewidth) - if store: - self.linewidth = linewidth - - @staticmethod - def _linejoin_cmd(linejoin): - # Support for directly passing integer values is for backcompat. - linejoin = {'miter': 0, 'round': 1, 'bevel': 2, 0: 0, 1: 1, 2: 2}[ - linejoin] - return f"{linejoin:d} setlinejoin\n" - - def set_linejoin(self, linejoin, store=True): - if linejoin != self.linejoin: - self._pswriter.write(self._linejoin_cmd(linejoin)) - if store: - self.linejoin = linejoin - - @staticmethod - def _linecap_cmd(linecap): - # Support for directly passing integer values is for backcompat. - linecap = {'butt': 0, 'round': 1, 'projecting': 2, 0: 0, 1: 1, 2: 2}[ - linecap] - return f"{linecap:d} setlinecap\n" - - def set_linecap(self, linecap, store=True): - if linecap != self.linecap: - self._pswriter.write(self._linecap_cmd(linecap)) - if store: - self.linecap = linecap - - def set_linedash(self, offset, seq, store=True): - if self.linedash is not None: - oldo, oldseq = self.linedash - if np.array_equal(seq, oldseq) and oldo == offset: - return - - self._pswriter.write(f"[{_nums_to_str(*seq)}]" - f" {_nums_to_str(offset)} setdash\n" - if seq is not None and len(seq) else - "[] 0 setdash\n") - if store: - self.linedash = (offset, seq) - - def set_font(self, fontname, fontsize, store=True): - if (fontname, fontsize) != (self.fontname, self.fontsize): - self._pswriter.write(f"/{fontname} {fontsize:1.3f} selectfont\n") - if store: - self.fontname = fontname - self.fontsize = fontsize - - def create_hatch(self, hatch): - sidelen = 72 - if hatch in self._hatches: - return self._hatches[hatch] - name = 'H%d' % len(self._hatches) - linewidth = mpl.rcParams['hatch.linewidth'] - pageheight = self.height * 72 - self._pswriter.write(f"""\ - << /PatternType 1 - /PaintType 2 - /TilingType 2 - /BBox[0 0 {sidelen:d} {sidelen:d}] - /XStep {sidelen:d} - /YStep {sidelen:d} - - /PaintProc {{ - pop - {linewidth:g} setlinewidth -{self._convert_path( - Path.hatch(hatch), Affine2D().scale(sidelen), simplify=False)} - gsave - fill - grestore - stroke - }} bind - >> - matrix - 0 {pageheight:g} translate - makepattern - /{name} exch def -""") - self._hatches[hatch] = name - return name - - def get_image_magnification(self): - """ - Get the factor by which to magnify images passed to draw_image. - Allows a backend to have images at a different resolution to other - artists. - """ - return self.image_magnification - - def _convert_path(self, path, transform, clip=False, simplify=None): - if clip: - clip = (0.0, 0.0, self.width * 72.0, self.height * 72.0) - else: - clip = None - return _path.convert_to_string( - path, transform, clip, simplify, None, - 6, [b"m", b"l", b"", b"c", b"cl"], True).decode("ascii") - - def _get_clip_cmd(self, gc): - clip = [] - rect = gc.get_clip_rectangle() - if rect is not None: - clip.append("%s clipbox\n" % _nums_to_str(*rect.size, *rect.p0)) - path, trf = gc.get_clip_path() - if path is not None: - key = (path, id(trf)) - custom_clip_cmd = self._clip_paths.get(key) - if custom_clip_cmd is None: - custom_clip_cmd = "c%d" % len(self._clip_paths) - self._pswriter.write(f"""\ -/{custom_clip_cmd} {{ -{self._convert_path(path, trf, simplify=False)} -clip -newpath -}} bind def -""") - self._clip_paths[key] = custom_clip_cmd - clip.append(f"{custom_clip_cmd}\n") - return "".join(clip) - - @_log_if_debug_on - def draw_image(self, gc, x, y, im, transform=None): - # docstring inherited - - h, w = im.shape[:2] - imagecmd = "false 3 colorimage" - data = im[::-1, :, :3] # Vertically flipped rgb values. - hexdata = data.tobytes().hex("\n", -64) # Linewrap to 128 chars. - - if transform is None: - matrix = "1 0 0 1 0 0" - xscale = w / self.image_magnification - yscale = h / self.image_magnification - else: - matrix = " ".join(map(str, transform.frozen().to_values())) - xscale = 1.0 - yscale = 1.0 - - self._pswriter.write(f"""\ -gsave -{self._get_clip_cmd(gc)} -{x:g} {y:g} translate -[{matrix}] concat -{xscale:g} {yscale:g} scale -/DataString {w:d} string def -{w:d} {h:d} 8 [ {w:d} 0 0 -{h:d} 0 {h:d} ] -{{ -currentfile DataString readhexstring pop -}} bind {imagecmd} -{hexdata} -grestore -""") - - @_log_if_debug_on - def draw_path(self, gc, path, transform, rgbFace=None): - # docstring inherited - clip = rgbFace is None and gc.get_hatch_path() is None - simplify = path.should_simplify and clip - ps = self._convert_path(path, transform, clip=clip, simplify=simplify) - self._draw_ps(ps, gc, rgbFace) - - @_log_if_debug_on - def draw_markers( - self, gc, marker_path, marker_trans, path, trans, rgbFace=None): - # docstring inherited - - ps_color = ( - None - if self._is_transparent(rgbFace) - else '%1.3f setgray' % rgbFace[0] - if rgbFace[0] == rgbFace[1] == rgbFace[2] - else '%1.3f %1.3f %1.3f setrgbcolor' % rgbFace[:3]) - - # construct the generic marker command: - - # don't want the translate to be global - ps_cmd = ['/o {', 'gsave', 'newpath', 'translate'] - - lw = gc.get_linewidth() - alpha = (gc.get_alpha() - if gc.get_forced_alpha() or len(gc.get_rgb()) == 3 - else gc.get_rgb()[3]) - stroke = lw > 0 and alpha > 0 - if stroke: - ps_cmd.append('%.1f setlinewidth' % lw) - ps_cmd.append(self._linejoin_cmd(gc.get_joinstyle())) - ps_cmd.append(self._linecap_cmd(gc.get_capstyle())) - - ps_cmd.append(self._convert_path(marker_path, marker_trans, - simplify=False)) - - if rgbFace: - if stroke: - ps_cmd.append('gsave') - if ps_color: - ps_cmd.extend([ps_color, 'fill']) - if stroke: - ps_cmd.append('grestore') - - if stroke: - ps_cmd.append('stroke') - ps_cmd.extend(['grestore', '} bind def']) - - for vertices, code in path.iter_segments( - trans, - clip=(0, 0, self.width*72, self.height*72), - simplify=False): - if len(vertices): - x, y = vertices[-2:] - ps_cmd.append("%g %g o" % (x, y)) - - ps = '\n'.join(ps_cmd) - self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False) - - @_log_if_debug_on - def draw_path_collection(self, gc, master_transform, paths, all_transforms, - offsets, offset_trans, facecolors, edgecolors, - linewidths, linestyles, antialiaseds, urls, - offset_position): - # Is the optimization worth it? Rough calculation: - # cost of emitting a path in-line is - # (len_path + 2) * uses_per_path - # cost of definition+use is - # (len_path + 3) + 3 * uses_per_path - len_path = len(paths[0].vertices) if len(paths) > 0 else 0 - uses_per_path = self._iter_collection_uses_per_path( - paths, all_transforms, offsets, facecolors, edgecolors) - should_do_optimization = \ - len_path + 3 * uses_per_path + 3 < (len_path + 2) * uses_per_path - if not should_do_optimization: - return RendererBase.draw_path_collection( - self, gc, master_transform, paths, all_transforms, - offsets, offset_trans, facecolors, edgecolors, - linewidths, linestyles, antialiaseds, urls, - offset_position) - - path_codes = [] - for i, (path, transform) in enumerate(self._iter_collection_raw_paths( - master_transform, paths, all_transforms)): - name = 'p%d_%d' % (self._path_collection_id, i) - path_bytes = self._convert_path(path, transform, simplify=False) - self._pswriter.write(f"""\ -/{name} {{ -newpath -translate -{path_bytes} -}} bind def -""") - path_codes.append(name) - - for xo, yo, path_id, gc0, rgbFace in self._iter_collection( - gc, path_codes, offsets, offset_trans, - facecolors, edgecolors, linewidths, linestyles, - antialiaseds, urls, offset_position): - ps = "%g %g %s" % (xo, yo, path_id) - self._draw_ps(ps, gc0, rgbFace) - - self._path_collection_id += 1 - - @_log_if_debug_on - def draw_tex(self, gc, x, y, s, prop, angle, *, mtext=None): - # docstring inherited - if self._is_transparent(gc.get_rgb()): - return # Special handling for fully transparent. - - if not hasattr(self, "psfrag"): - self._logwarn_once( - "The PS backend determines usetex status solely based on " - "rcParams['text.usetex'] and does not support having " - "usetex=True only for some elements; this element will thus " - "be rendered as if usetex=False.") - self.draw_text(gc, x, y, s, prop, angle, False, mtext) - return - - w, h, bl = self.get_text_width_height_descent(s, prop, ismath="TeX") - fontsize = prop.get_size_in_points() - thetext = 'psmarker%d' % self.textcnt - color = '%1.3f,%1.3f,%1.3f' % gc.get_rgb()[:3] - fontcmd = {'sans-serif': r'{\sffamily %s}', - 'monospace': r'{\ttfamily %s}'}.get( - mpl.rcParams['font.family'][0], r'{\rmfamily %s}') - s = fontcmd % s - tex = r'\color[rgb]{%s} %s' % (color, s) - - # Stick to the bottom alignment. - pos = _nums_to_str(x, y-bl) - self.psfrag.append( - r'\psfrag{%s}[bl][bl][1][%f]{\fontsize{%f}{%f}%s}' % ( - thetext, angle, fontsize, fontsize*1.25, tex)) - - self._pswriter.write(f"""\ -gsave -{pos} moveto -({thetext}) -show -grestore -""") - self.textcnt += 1 - - @_log_if_debug_on - def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None): - # docstring inherited - - if self._is_transparent(gc.get_rgb()): - return # Special handling for fully transparent. - - if ismath == 'TeX': - return self.draw_tex(gc, x, y, s, prop, angle) - - if ismath: - return self.draw_mathtext(gc, x, y, s, prop, angle) - - stream = [] # list of (ps_name, x, char_name) - - if mpl.rcParams['ps.useafm']: - font = self._get_font_afm(prop) - ps_name = (font.postscript_name.encode("ascii", "replace") - .decode("ascii")) - scale = 0.001 * prop.get_size_in_points() - thisx = 0 - last_name = None # kerns returns 0 for None. - for c in s: - name = uni2type1.get(ord(c), f"uni{ord(c):04X}") - try: - width = font.get_width_from_char_name(name) - except KeyError: - name = 'question' - width = font.get_width_char('?') - kern = font.get_kern_dist_from_name(last_name, name) - last_name = name - thisx += kern * scale - stream.append((ps_name, thisx, name)) - thisx += width * scale - - else: - font = self._get_font_ttf(prop) - self._character_tracker.track(font, s) - for item in _text_helpers.layout(s, font): - ps_name = (item.ft_object.postscript_name - .encode("ascii", "replace").decode("ascii")) - glyph_name = item.ft_object.get_glyph_name(item.glyph_idx) - stream.append((ps_name, item.x, glyph_name)) - self.set_color(*gc.get_rgb()) - - for ps_name, group in itertools. \ - groupby(stream, lambda entry: entry[0]): - self.set_font(ps_name, prop.get_size_in_points(), False) - thetext = "\n".join(f"{x:g} 0 m /{name:s} glyphshow" - for _, x, name in group) - self._pswriter.write(f"""\ -gsave -{self._get_clip_cmd(gc)} -{x:g} {y:g} translate -{angle:g} rotate -{thetext} -grestore -""") - - @_log_if_debug_on - def draw_mathtext(self, gc, x, y, s, prop, angle): - """Draw the math text using matplotlib.mathtext.""" - width, height, descent, glyphs, rects = \ - self._text2path.mathtext_parser.parse(s, 72, prop) - self.set_color(*gc.get_rgb()) - self._pswriter.write( - f"gsave\n" - f"{x:g} {y:g} translate\n" - f"{angle:g} rotate\n") - lastfont = None - for font, fontsize, num, ox, oy in glyphs: - self._character_tracker.track_glyph(font, num) - if (font.postscript_name, fontsize) != lastfont: - lastfont = font.postscript_name, fontsize - self._pswriter.write( - f"/{font.postscript_name} {fontsize} selectfont\n") - glyph_name = ( - font.get_name_char(chr(num)) if isinstance(font, AFM) else - font.get_glyph_name(font.get_char_index(num))) - self._pswriter.write( - f"{ox:g} {oy:g} moveto\n" - f"/{glyph_name} glyphshow\n") - for ox, oy, w, h in rects: - self._pswriter.write(f"{ox} {oy} {w} {h} rectfill\n") - self._pswriter.write("grestore\n") - - @_log_if_debug_on - def draw_gouraud_triangle(self, gc, points, colors, trans): - self.draw_gouraud_triangles(gc, points.reshape((1, 3, 2)), - colors.reshape((1, 3, 4)), trans) - - @_log_if_debug_on - def draw_gouraud_triangles(self, gc, points, colors, trans): - assert len(points) == len(colors) - assert points.ndim == 3 - assert points.shape[1] == 3 - assert points.shape[2] == 2 - assert colors.ndim == 3 - assert colors.shape[1] == 3 - assert colors.shape[2] == 4 - - shape = points.shape - flat_points = points.reshape((shape[0] * shape[1], 2)) - flat_points = trans.transform(flat_points) - flat_colors = colors.reshape((shape[0] * shape[1], 4)) - points_min = np.min(flat_points, axis=0) - (1 << 12) - points_max = np.max(flat_points, axis=0) + (1 << 12) - factor = np.ceil((2 ** 32 - 1) / (points_max - points_min)) - - xmin, ymin = points_min - xmax, ymax = points_max - - data = np.empty( - shape[0] * shape[1], - dtype=[('flags', 'u1'), ('points', '2>u4'), ('colors', '3u1')]) - data['flags'] = 0 - data['points'] = (flat_points - points_min) * factor - data['colors'] = flat_colors[:, :3] * 255.0 - hexdata = data.tobytes().hex("\n", -64) # Linewrap to 128 chars. - - self._pswriter.write(f"""\ -gsave -<< /ShadingType 4 - /ColorSpace [/DeviceRGB] - /BitsPerCoordinate 32 - /BitsPerComponent 8 - /BitsPerFlag 8 - /AntiAlias true - /Decode [ {xmin:g} {xmax:g} {ymin:g} {ymax:g} 0 1 0 1 0 1 ] - /DataSource < -{hexdata} -> ->> -shfill -grestore -""") - - def _draw_ps(self, ps, gc, rgbFace, *, fill=True, stroke=True): - """ - Emit the PostScript snippet *ps* with all the attributes from *gc* - applied. *ps* must consist of PostScript commands to construct a path. - - The *fill* and/or *stroke* kwargs can be set to False if the *ps* - string already includes filling and/or stroking, in which case - `_draw_ps` is just supplying properties and clipping. - """ - write = self._pswriter.write - mightstroke = (gc.get_linewidth() > 0 - and not self._is_transparent(gc.get_rgb())) - if not mightstroke: - stroke = False - if self._is_transparent(rgbFace): - fill = False - hatch = gc.get_hatch() - - if mightstroke: - self.set_linewidth(gc.get_linewidth()) - self.set_linejoin(gc.get_joinstyle()) - self.set_linecap(gc.get_capstyle()) - self.set_linedash(*gc.get_dashes()) - if mightstroke or hatch: - self.set_color(*gc.get_rgb()[:3]) - write('gsave\n') - - write(self._get_clip_cmd(gc)) - - write(ps.strip()) - write("\n") - - if fill: - if stroke or hatch: - write("gsave\n") - self.set_color(*rgbFace[:3], store=False) - write("fill\n") - if stroke or hatch: - write("grestore\n") - - if hatch: - hatch_name = self.create_hatch(hatch) - write("gsave\n") - write("%f %f %f " % gc.get_hatch_color()[:3]) - write("%s setpattern fill grestore\n" % hatch_name) - - if stroke: - write("stroke\n") - - write("grestore\n") - - -class _Orientation(Enum): - portrait, landscape = range(2) - - def swap_if_landscape(self, shape): - return shape[::-1] if self.name == "landscape" else shape - - -class FigureCanvasPS(FigureCanvasBase): - fixed_dpi = 72 - filetypes = {'ps': 'Postscript', - 'eps': 'Encapsulated Postscript'} - - def get_default_filetype(self): - return 'ps' - - def _print_ps( - self, fmt, outfile, *, - metadata=None, papertype=None, orientation='portrait', - bbox_inches_restore=None, **kwargs): - - dpi = self.figure.dpi - self.figure.dpi = 72 # Override the dpi kwarg - - dsc_comments = {} - if isinstance(outfile, (str, os.PathLike)): - filename = pathlib.Path(outfile).name - dsc_comments["Title"] = \ - filename.encode("ascii", "replace").decode("ascii") - dsc_comments["Creator"] = (metadata or {}).get( - "Creator", - f"Matplotlib v{mpl.__version__}, https://matplotlib.org/") - # See https://reproducible-builds.org/specs/source-date-epoch/ - source_date_epoch = os.getenv("SOURCE_DATE_EPOCH") - dsc_comments["CreationDate"] = ( - datetime.datetime.fromtimestamp( - int(source_date_epoch), - datetime.timezone.utc).strftime("%a %b %d %H:%M:%S %Y") - if source_date_epoch - else time.ctime()) - dsc_comments = "\n".join( - f"%%{k}: {v}" for k, v in dsc_comments.items()) - - if papertype is None: - papertype = mpl.rcParams['ps.papersize'] - papertype = papertype.lower() - _api.check_in_list(['auto', *papersize], papertype=papertype) - - orientation = _api.check_getitem( - _Orientation, orientation=orientation.lower()) - - printer = (self._print_figure_tex - if mpl.rcParams['text.usetex'] else - self._print_figure) - printer(fmt, outfile, dpi=dpi, dsc_comments=dsc_comments, - orientation=orientation, papertype=papertype, - bbox_inches_restore=bbox_inches_restore, **kwargs) - - def _print_figure( - self, fmt, outfile, *, - dpi, dsc_comments, orientation, papertype, - bbox_inches_restore=None): - """ - Render the figure to a filesystem path or a file-like object. - - Parameters are as for `.print_figure`, except that *dsc_comments* is a - string containing Document Structuring Convention comments, - generated from the *metadata* parameter to `.print_figure`. - """ - is_eps = fmt == 'eps' - if not (isinstance(outfile, (str, os.PathLike)) - or is_writable_file_like(outfile)): - raise ValueError("outfile must be a path or a file-like object") - - # find the appropriate papertype - width, height = self.figure.get_size_inches() - if papertype == 'auto': - papertype = _get_papertype( - *orientation.swap_if_landscape((width, height))) - paper_width, paper_height = orientation.swap_if_landscape( - papersize[papertype]) - - if mpl.rcParams['ps.usedistiller']: - # distillers improperly clip eps files if pagesize is too small - if width > paper_width or height > paper_height: - papertype = _get_papertype( - *orientation.swap_if_landscape((width, height))) - paper_width, paper_height = orientation.swap_if_landscape( - papersize[papertype]) - - # center the figure on the paper - xo = 72 * 0.5 * (paper_width - width) - yo = 72 * 0.5 * (paper_height - height) - - llx = xo - lly = yo - urx = llx + self.figure.bbox.width - ury = lly + self.figure.bbox.height - rotation = 0 - if orientation is _Orientation.landscape: - llx, lly, urx, ury = lly, llx, ury, urx - xo, yo = 72 * paper_height - yo, xo - rotation = 90 - bbox = (llx, lly, urx, ury) - - self._pswriter = StringIO() - - # mixed mode rendering - ps_renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi) - renderer = MixedModeRenderer( - self.figure, width, height, dpi, ps_renderer, - bbox_inches_restore=bbox_inches_restore) - - self.figure.draw(renderer) - - def print_figure_impl(fh): - # write the PostScript headers - if is_eps: - print("%!PS-Adobe-3.0 EPSF-3.0", file=fh) - else: - print(f"%!PS-Adobe-3.0\n" - f"%%DocumentPaperSizes: {papertype}\n" - f"%%Pages: 1\n", - end="", file=fh) - print(f"{dsc_comments}\n" - f"%%Orientation: {orientation.name}\n" - f"{get_bbox_header(bbox)[0]}\n" - f"%%EndComments\n", - end="", file=fh) - - Ndict = len(psDefs) - print("%%BeginProlog", file=fh) - if not mpl.rcParams['ps.useafm']: - Ndict += len(ps_renderer._character_tracker.used) - print("/mpldict %d dict def" % Ndict, file=fh) - print("mpldict begin", file=fh) - print("\n".join(psDefs), file=fh) - if not mpl.rcParams['ps.useafm']: - for font_path, chars \ - in ps_renderer._character_tracker.used.items(): - if not chars: - continue - fonttype = mpl.rcParams['ps.fonttype'] - # Can't use more than 255 chars from a single Type 3 font. - if len(chars) > 255: - fonttype = 42 - fh.flush() - if fonttype == 3: - fh.write(_font_to_ps_type3(font_path, chars)) - else: # Type 42 only. - _font_to_ps_type42(font_path, chars, fh) - print("end", file=fh) - print("%%EndProlog", file=fh) - - if not is_eps: - print("%%Page: 1 1", file=fh) - print("mpldict begin", file=fh) - - print("%s translate" % _nums_to_str(xo, yo), file=fh) - if rotation: - print("%d rotate" % rotation, file=fh) - print("%s clipbox" % _nums_to_str(width*72, height*72, 0, 0), - file=fh) - - # write the figure - print(self._pswriter.getvalue(), file=fh) - - # write the trailer - print("end", file=fh) - print("showpage", file=fh) - if not is_eps: - print("%%EOF", file=fh) - fh.flush() - - if mpl.rcParams['ps.usedistiller']: - # We are going to use an external program to process the output. - # Write to a temporary file. - with TemporaryDirectory() as tmpdir: - tmpfile = os.path.join(tmpdir, "tmp.ps") - with open(tmpfile, 'w', encoding='latin-1') as fh: - print_figure_impl(fh) - if mpl.rcParams['ps.usedistiller'] == 'ghostscript': - _try_distill(gs_distill, - tmpfile, is_eps, ptype=papertype, bbox=bbox) - elif mpl.rcParams['ps.usedistiller'] == 'xpdf': - _try_distill(xpdf_distill, - tmpfile, is_eps, ptype=papertype, bbox=bbox) - _move_path_to_path_or_stream(tmpfile, outfile) - - else: # Write directly to outfile. - with cbook.open_file_cm(outfile, "w", encoding="latin-1") as file: - if not file_requires_unicode(file): - file = codecs.getwriter("latin-1")(file) - print_figure_impl(file) - - def _print_figure_tex( - self, fmt, outfile, *, - dpi, dsc_comments, orientation, papertype, - bbox_inches_restore=None): - """ - If :rc:`text.usetex` is True, a temporary pair of tex/eps files - are created to allow tex to manage the text layout via the PSFrags - package. These files are processed to yield the final ps or eps file. - - The rest of the behavior is as for `._print_figure`. - """ - is_eps = fmt == 'eps' - - width, height = self.figure.get_size_inches() - xo = 0 - yo = 0 - - llx = xo - lly = yo - urx = llx + self.figure.bbox.width - ury = lly + self.figure.bbox.height - bbox = (llx, lly, urx, ury) - - self._pswriter = StringIO() - - # mixed mode rendering - ps_renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi) - renderer = MixedModeRenderer(self.figure, - width, height, dpi, ps_renderer, - bbox_inches_restore=bbox_inches_restore) - - self.figure.draw(renderer) - - # write to a temp file, we'll move it to outfile when done - with TemporaryDirectory() as tmpdir: - tmppath = pathlib.Path(tmpdir, "tmp.ps") - tmppath.write_text( - f"""\ -%!PS-Adobe-3.0 EPSF-3.0 -{dsc_comments} -{get_bbox_header(bbox)[0]} -%%EndComments -%%BeginProlog -/mpldict {len(psDefs)} dict def -mpldict begin -{"".join(psDefs)} -end -%%EndProlog -mpldict begin -{_nums_to_str(xo, yo)} translate -{_nums_to_str(width*72, height*72)} 0 0 clipbox -{self._pswriter.getvalue()} -end -showpage -""", - encoding="latin-1") - - if orientation is _Orientation.landscape: # now, ready to rotate - width, height = height, width - bbox = (lly, llx, ury, urx) - - # set the paper size to the figure size if is_eps. The - # resulting ps file has the given size with correct bounding - # box so that there is no need to call 'pstoeps' - if is_eps: - paper_width, paper_height = orientation.swap_if_landscape( - self.figure.get_size_inches()) - else: - if papertype == 'auto': - papertype = _get_papertype(width, height) - paper_width, paper_height = papersize[papertype] - - psfrag_rotated = _convert_psfrags( - tmppath, ps_renderer.psfrag, paper_width, paper_height, - orientation.name) - - if (mpl.rcParams['ps.usedistiller'] == 'ghostscript' - or mpl.rcParams['text.usetex']): - _try_distill(gs_distill, - tmppath, is_eps, ptype=papertype, bbox=bbox, - rotated=psfrag_rotated) - elif mpl.rcParams['ps.usedistiller'] == 'xpdf': - _try_distill(xpdf_distill, - tmppath, is_eps, ptype=papertype, bbox=bbox, - rotated=psfrag_rotated) - - _move_path_to_path_or_stream(tmppath, outfile) - - print_ps = functools.partialmethod(_print_ps, "ps") - print_eps = functools.partialmethod(_print_ps, "eps") - - def draw(self): - self.figure.draw_without_rendering() - return super().draw() - - -@_api.deprecated("3.6") -def convert_psfrags(tmpfile, psfrags, font_preamble, custom_preamble, - paper_width, paper_height, orientation): - return _convert_psfrags( - pathlib.Path(tmpfile), psfrags, paper_width, paper_height, orientation) - - -def _convert_psfrags(tmppath, psfrags, paper_width, paper_height, orientation): - """ - When we want to use the LaTeX backend with postscript, we write PSFrag tags - to a temporary postscript file, each one marking a position for LaTeX to - render some text. convert_psfrags generates a LaTeX document containing the - commands to convert those tags to text. LaTeX/dvips produces the postscript - file that includes the actual text. - """ - with mpl.rc_context({ - "text.latex.preamble": - mpl.rcParams["text.latex.preamble"] + - mpl.texmanager._usepackage_if_not_loaded("color") + - mpl.texmanager._usepackage_if_not_loaded("graphicx") + - mpl.texmanager._usepackage_if_not_loaded("psfrag") + - r"\geometry{papersize={%(width)sin,%(height)sin},margin=0in}" - % {"width": paper_width, "height": paper_height} - }): - dvifile = TexManager().make_dvi( - "\n" - r"\begin{figure}""\n" - r" \centering\leavevmode""\n" - r" %(psfrags)s""\n" - r" \includegraphics*[angle=%(angle)s]{%(epsfile)s}""\n" - r"\end{figure}" - % { - "psfrags": "\n".join(psfrags), - "angle": 90 if orientation == 'landscape' else 0, - "epsfile": tmppath.resolve().as_posix(), - }, - fontsize=10) # tex's default fontsize. - - with TemporaryDirectory() as tmpdir: - psfile = os.path.join(tmpdir, "tmp.ps") - cbook._check_and_log_subprocess( - ['dvips', '-q', '-R0', '-o', psfile, dvifile], _log) - shutil.move(psfile, tmppath) - - # check if the dvips created a ps in landscape paper. Somehow, - # above latex+dvips results in a ps file in a landscape mode for a - # certain figure sizes (e.g., 8.3in, 5.8in which is a5). And the - # bounding box of the final output got messed up. We check see if - # the generated ps file is in landscape and return this - # information. The return value is used in pstoeps step to recover - # the correct bounding box. 2010-06-05 JJL - with open(tmppath) as fh: - psfrag_rotated = "Landscape" in fh.read(1000) - return psfrag_rotated - - -def _try_distill(func, tmppath, *args, **kwargs): - try: - func(str(tmppath), *args, **kwargs) - except mpl.ExecutableNotFoundError as exc: - _log.warning("%s. Distillation step skipped.", exc) - - -def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False): - """ - Use ghostscript's pswrite or epswrite device to distill a file. - This yields smaller files without illegal encapsulated postscript - operators. The output is low-level, converting text to outlines. - """ - - if eps: - paper_option = "-dEPSCrop" - else: - paper_option = "-sPAPERSIZE=%s" % ptype - - psfile = tmpfile + '.ps' - dpi = mpl.rcParams['ps.distiller.res'] - - cbook._check_and_log_subprocess( - [mpl._get_executable_info("gs").executable, - "-dBATCH", "-dNOPAUSE", "-r%d" % dpi, "-sDEVICE=ps2write", - paper_option, "-sOutputFile=%s" % psfile, tmpfile], - _log) - - os.remove(tmpfile) - shutil.move(psfile, tmpfile) - - # While it is best if above steps preserve the original bounding - # box, there seem to be cases when it is not. For those cases, - # the original bbox can be restored during the pstoeps step. - - if eps: - # For some versions of gs, above steps result in a ps file where the - # original bbox is no more correct. Do not adjust bbox for now. - pstoeps(tmpfile, bbox, rotated=rotated) - - -def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False): - """ - Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file. - This yields smaller files without illegal encapsulated postscript - operators. This distiller is preferred, generating high-level postscript - output that treats text as text. - """ - mpl._get_executable_info("gs") # Effectively checks for ps2pdf. - mpl._get_executable_info("pdftops") - - with TemporaryDirectory() as tmpdir: - tmppdf = pathlib.Path(tmpdir, "tmp.pdf") - tmpps = pathlib.Path(tmpdir, "tmp.ps") - # Pass options as `-foo#bar` instead of `-foo=bar` to keep Windows - # happy (https://ghostscript.com/doc/9.56.1/Use.htm#MS_Windows). - cbook._check_and_log_subprocess( - ["ps2pdf", - "-dAutoFilterColorImages#false", - "-dAutoFilterGrayImages#false", - "-sAutoRotatePages#None", - "-sGrayImageFilter#FlateEncode", - "-sColorImageFilter#FlateEncode", - "-dEPSCrop" if eps else "-sPAPERSIZE#%s" % ptype, - tmpfile, tmppdf], _log) - cbook._check_and_log_subprocess( - ["pdftops", "-paper", "match", "-level2", tmppdf, tmpps], _log) - shutil.move(tmpps, tmpfile) - if eps: - pstoeps(tmpfile) - - -def get_bbox_header(lbrt, rotated=False): - """ - Return a postscript header string for the given bbox lbrt=(l, b, r, t). - Optionally, return rotate command. - """ - - l, b, r, t = lbrt - if rotated: - rotate = "%.2f %.2f translate\n90 rotate" % (l+r, 0) - else: - rotate = "" - bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, np.ceil(r), np.ceil(t)) - hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % ( - l, b, r, t) - - return '\n'.join([bbox_info, hires_bbox_info]), rotate - - -def pstoeps(tmpfile, bbox=None, rotated=False): - """ - Convert the postscript to encapsulated postscript. The bbox of - the eps file will be replaced with the given *bbox* argument. If - None, original bbox will be used. - """ - - # if rotated==True, the output eps file need to be rotated - if bbox: - bbox_info, rotate = get_bbox_header(bbox, rotated=rotated) - else: - bbox_info, rotate = None, None - - epsfile = tmpfile + '.eps' - with open(epsfile, 'wb') as epsh, open(tmpfile, 'rb') as tmph: - write = epsh.write - # Modify the header: - for line in tmph: - if line.startswith(b'%!PS'): - write(b"%!PS-Adobe-3.0 EPSF-3.0\n") - if bbox: - write(bbox_info.encode('ascii') + b'\n') - elif line.startswith(b'%%EndComments'): - write(line) - write(b'%%BeginProlog\n' - b'save\n' - b'countdictstack\n' - b'mark\n' - b'newpath\n' - b'/showpage {} def\n' - b'/setpagedevice {pop} def\n' - b'%%EndProlog\n' - b'%%Page 1 1\n') - if rotate: - write(rotate.encode('ascii') + b'\n') - break - elif bbox and line.startswith((b'%%Bound', b'%%HiResBound', - b'%%DocumentMedia', b'%%Pages')): - pass - else: - write(line) - # Now rewrite the rest of the file, and modify the trailer. - # This is done in a second loop such that the header of the embedded - # eps file is not modified. - for line in tmph: - if line.startswith(b'%%EOF'): - write(b'cleartomark\n' - b'countdictstack\n' - b'exch sub { end } repeat\n' - b'restore\n' - b'showpage\n' - b'%%EOF\n') - elif line.startswith(b'%%PageBoundingBox'): - pass - else: - write(line) - - os.remove(tmpfile) - shutil.move(epsfile, tmpfile) - - -FigureManagerPS = FigureManagerBase - - -# The following Python dictionary psDefs contains the entries for the -# PostScript dictionary mpldict. This dictionary implements most of -# the matplotlib primitives and some abbreviations. -# -# References: -# https://www.adobe.com/content/dam/acom/en/devnet/actionscript/articles/PLRM.pdf -# http://preserve.mactech.com/articles/mactech/Vol.09/09.04/PostscriptTutorial -# http://www.math.ubc.ca/people/faculty/cass/graphics/text/www/ -# - -# The usage comments use the notation of the operator summary -# in the PostScript Language reference manual. -psDefs = [ - # name proc *_d* - - # Note that this cannot be bound to /d, because when embedding a Type3 font - # we may want to define a "d" glyph using "/d{...} d" which would locally - # overwrite the definition. - "/_d { bind def } bind def", - # x y *m* - - "/m { moveto } _d", - # x y *l* - - "/l { lineto } _d", - # x y *r* - - "/r { rlineto } _d", - # x1 y1 x2 y2 x y *c* - - "/c { curveto } _d", - # *cl* - - "/cl { closepath } _d", - # *ce* - - "/ce { closepath eofill } _d", - # w h x y *box* - - """/box { - m - 1 index 0 r - 0 exch r - neg 0 r - cl - } _d""", - # w h x y *clipbox* - - """/clipbox { - box - clip - newpath - } _d""", - # wx wy llx lly urx ury *setcachedevice* - - "/sc { setcachedevice } _d", -] - - -@_Backend.export -class _BackendPS(_Backend): - backend_version = 'Level II' - FigureCanvas = FigureCanvasPS diff --git a/spaces/dcq/freegpt-webui/client/css/checkbox.css b/spaces/dcq/freegpt-webui/client/css/checkbox.css deleted file mode 100644 index 582068c588877acdca097724fb1f90190f555921..0000000000000000000000000000000000000000 --- a/spaces/dcq/freegpt-webui/client/css/checkbox.css +++ /dev/null @@ -1,59 +0,0 @@ -.checkbox input { - height: 0; - width: 0; - display: none; -} - -.checkbox span { - font-size: 0.875rem; - color: var(--colour-3); - margin-left: 4px; -} - -.checkbox label:after { - content: ""; - position: absolute; - top: 50%; - transform: translateY(-50%); - left: 5px; - width: 20px; - height: 20px; - background: var(--blur-border); - border-radius: 90px; - transition: 0.33s; -} - -.checkbox input + label:after, -.checkbox input:checked + label { - background: var(--colour-3); -} - -.checkbox input + label, -.checkbox input:checked + label:after { - background: var(--blur-border); -} - -.checkbox input:checked + label:after { - left: calc(100% - 5px - 20px); -} - -@media screen and (max-width: 990px) { - .checkbox span { - font-size: 0.75rem; - } - - .checkbox label { - width: 25px; - height: 15px; - } - - .checkbox label:after { - left: 2px; - width: 10px; - height: 10px; - } - - .checkbox input:checked + label:after { - left: calc(100% - 2px - 10px); - } -} diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py b/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py deleted file mode 100644 index 018e020491ce3711117f9afe13547f12b8ddf48e..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from typing import List, Optional, Tuple, Union - -import torch - -from ...utils import logging, randn_tensor -from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -class DanceDiffusionPipeline(DiffusionPipeline): - r""" - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Parameters: - unet ([`UNet1DModel`]): U-Net architecture to denoise the encoded image. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of - [`IPNDMScheduler`]. - """ - - def __init__(self, unet, scheduler): - super().__init__() - self.register_modules(unet=unet, scheduler=scheduler) - - @torch.no_grad() - def __call__( - self, - batch_size: int = 1, - num_inference_steps: int = 100, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - audio_length_in_s: Optional[float] = None, - return_dict: bool = True, - ) -> Union[AudioPipelineOutput, Tuple]: - r""" - Args: - batch_size (`int`, *optional*, defaults to 1): - The number of audio samples to generate. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality audio sample at - the expense of slower inference. - generator (`torch.Generator`, *optional*): - One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) - to make generation deterministic. - audio_length_in_s (`float`, *optional*, defaults to `self.unet.config.sample_size/self.unet.config.sample_rate`): - The length of the generated audio sample in seconds. Note that the output of the pipeline, *i.e.* - `sample_size`, will be `audio_length_in_s` * `self.unet.sample_rate`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. - - Returns: - [`~pipelines.AudioPipelineOutput`] or `tuple`: [`~pipelines.utils.AudioPipelineOutput`] if `return_dict` is - True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. - """ - - if audio_length_in_s is None: - audio_length_in_s = self.unet.config.sample_size / self.unet.config.sample_rate - - sample_size = audio_length_in_s * self.unet.sample_rate - - down_scale_factor = 2 ** len(self.unet.up_blocks) - if sample_size < 3 * down_scale_factor: - raise ValueError( - f"{audio_length_in_s} is too small. Make sure it's bigger or equal to" - f" {3 * down_scale_factor / self.unet.sample_rate}." - ) - - original_sample_size = int(sample_size) - if sample_size % down_scale_factor != 0: - sample_size = ((audio_length_in_s * self.unet.sample_rate) // down_scale_factor + 1) * down_scale_factor - logger.info( - f"{audio_length_in_s} is increased to {sample_size / self.unet.sample_rate} so that it can be handled" - f" by the model. It will be cut to {original_sample_size / self.unet.sample_rate} after the denoising" - " process." - ) - sample_size = int(sample_size) - - dtype = next(iter(self.unet.parameters())).dtype - shape = (batch_size, self.unet.in_channels, sample_size) - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - audio = randn_tensor(shape, generator=generator, device=self.device, dtype=dtype) - - # set step values - self.scheduler.set_timesteps(num_inference_steps, device=audio.device) - self.scheduler.timesteps = self.scheduler.timesteps.to(dtype) - - for t in self.progress_bar(self.scheduler.timesteps): - # 1. predict noise model_output - model_output = self.unet(audio, t).sample - - # 2. compute previous image: x_t -> t_t-1 - audio = self.scheduler.step(model_output, t, audio).prev_sample - - audio = audio.clamp(-1, 1).float().cpu().numpy() - - audio = audio[:, :, :original_sample_size] - - if not return_dict: - return (audio,) - - return AudioPipelineOutput(audios=audio) diff --git a/spaces/deepliteai/yolobench/app.py b/spaces/deepliteai/yolobench/app.py deleted file mode 100644 index ea627fb274d737ed8f5078ba1e815922dfac4936..0000000000000000000000000000000000000000 --- a/spaces/deepliteai/yolobench/app.py +++ /dev/null @@ -1,285 +0,0 @@ -import gradio as gr - -from plotting import create_yolobench_plots, get_pareto_table -from utils import DEEPLITE_DARK_BLUE_GRADIO - -def get_hw_description(hw_name): - HW_URLS = { - 'Jetson Nano (GPU, ONNX Runtime, FP32)': 'https://8074457.fs1.hubspotusercontent-na1.net/hubfs/8074457/YOLOBench%20Hardware%20product%20sheets/JetsonNano_DataSheet_DS09366001v1.1.pdf', - 'Raspberry Pi 4 Model B (CPU, TFLite, FP32)': 'https://8074457.fs1.hubspotusercontent-na1.net/hubfs/8074457/YOLOBench%20Hardware%20product%20sheets/raspberry-pi-4-datasheet.pdf', - 'Intel® Core™i7-10875H (CPU, OpenVINO, FP32)': 'https://8074457.fs1.hubspotusercontent-na1.net/hubfs/8074457/YOLOBench%20Hardware%20product%20sheets/Intel_ARK_SpecificationsChart_2023_10_11.pdf', - 'Khadas VIM3 (NPU, INT16)': 'https://8074457.fs1.hubspotusercontent-na1.net/hubfs/8074457/YOLOBench%20Hardware%20product%20sheets/khadas_vim3_specs.pdf', - 'Orange Pi 5 (NPU, FP16)': 'https://8074457.fs1.hubspotusercontent-na1.net/hubfs/8074457/YOLOBench%20Hardware%20product%20sheets/OrangePi_5_RK3588S_User%20Manual_v1.5.pdf', - } - - hw_url = HW_URLS[hw_name] - DESC = f""" - 🔸 Click [here]({hw_url}) for more information on the selected hardware platform. - 🔸 Refer to the [Deeplite Torch Zoo](https://github.com/Deeplite/deeplite-torch-zoo/tree/develop/results/yolobench) for details about latency measurement experiments. - """ - return DESC - - -with gr.Blocks(theme=gr.themes.Default(secondary_hue=DEEPLITE_DARK_BLUE_GRADIO), - css="table { width: 100%; }", analytics_enabled=True) as demo: - - gr.HTML( - """ -
      - -
      - """ - ) - - # switch to light theme by default - demo.load( - None, - _js=""" - () => { - let mediaQueryObj = window.matchMedia('(prefers-color-scheme: dark)'); - let systemDarkTheme = window.location.href.includes("theme=system") && mediaQueryObj.matches; - if (mediaQueryObj.matches){ - document.body.classList.toggle('dark'); - document.querySelector('gradio-app').style.backgroundColor = 'var(--color-background-primary)' - } - } - """, - ) - - demo.load( - None, - _js=""" - () => { - const script2 = document.createElement("script"); - script2.src = "https://www.googletagmanager.com/gtag/js?id=G-01G83VTHE0"; - script2.async = true; - document.head.appendChild(script2); - window.dataLayer = window.dataLayer || []; - function gtag(){dataLayer.push(arguments);} - gtag('js', new Date()); - gtag('config', 'G-01G83VTHE0', { - 'page_path': "/spaces/deepliteai/yolobench", - 'page_title': 'yolobench', - 'cookie_flags': 'SameSite=None;Secure', - 'debug_mode':true, - }); - } - """, - ) - - with gr.Row(): - gr.Markdown( - """ - - - 🚀 YOLOBench 🚀 is a latency-accuracy benchmark of popular single-stage detectors from the YOLO series. Major highlights of this work are: - - 🔸 includes architectures from YOLOv3 to YOLOv8,
      - 🔸 trained on four popular object detection datasets (COCO, VOC, WIDER FACE, SKU-110k),
      - 🔸 latency measured on five embedded hardware platforms (Jetson Nano GPU, ARM CPU, Intel CPU, Khadas VIM3 NPU, Orange Pi NPU),
      - 🔸 all models are trained with the same training loop and hyperparameters (as implemented in the [Ultralytics YOLOv8 codebase](https://github.com/ultralytics/ultralytics)),
      - 🔸 both the detection head structure and the loss function used are that of YOLOv8, giving a chance to isolate the contribution of the backbone/neck architecture on the latency-accuracy trade-off of YOLO models.
      - In particular, we show that older backbone/neck structures like those of YOLOv3 and YOLOv4 are still competitive compared to more recent architectures in a controlled environment. For more details, please refer to the [arXiv preprint](https://arxiv.org/abs/2307.13901) and the [codebase](https://github.com/Deeplite/deeplite-torch-zoo). - - # - -
      - - # - """ - ) - - with gr.Row(equal_height=True): - with gr.Column(): - hardware_name = gr.Dropdown( - choices=[ - 'Jetson Nano (GPU, ONNX Runtime, FP32)', - 'Raspberry Pi 4 Model B (CPU, TFLite, FP32)', - 'Intel® Core™i7-10875H (CPU, OpenVINO, FP32)', - 'Khadas VIM3 (NPU, INT16)', - 'Orange Pi 5 (NPU, FP16)', - ], - value='Jetson Nano (GPU, ONNX Runtime, FP32)', - label='Hardware', - ) - with gr.Column(): - dataset_name = gr.Dropdown( - choices=['COCO', 'PASCAL VOC', 'SKU-110K', 'WIDERFACE'], - value='COCO', - label='Dataset', - ) - - with gr.Row(equal_height=True): - with gr.Column(): - hardware_desc = gr.Markdown(get_hw_description(hardware_name.value)) - - with gr.Column(): - metric_name = gr.Radio( - ['mAP@0.5:0.95', 'mAP@0.5', 'Precision', 'Recall'], - value='mAP@0.5:0.95', - label='Accuracy metric to plot', - ) - - with gr.Row(equal_height=True): - with gr.Column(): - gr.Markdown(""" - - - 🚀 Want to add your own hardware benchmarks to YOLOBench? 🚀 - Contact us [here](https://info.deeplite.ai/add_yolobench_data) for your benchmarking kit and we'll set you up! - - - """) - - with gr.Column(): - vis_options = gr.CheckboxGroup( - [ - 'Model family', - 'Highlight Pareto', - 'Show Pareto only', - 'Log x-axis' - ], - value=['Model family',], - label='Visualization options', - ) - - - with gr.Row(): - upper_panel_fig = gr.Plot(show_label=False) - - gr.Markdown( - """ - ## - - - Models from this benchmark can be loaded using [Deeplite Torch Zoo](https://github.com/Deeplite/deeplite-torch-zoo) as follows: - - - - ## - - ```python - from deeplite_torch_zoo import get_model - model = get_model( - model_name='yolo4n', # create a YOLOv4n model for the COCO dataset - dataset_name='coco', # (`n` corresponds to width factor 0.25, depth factor 0.33) - pretrained=False, # - custom_head='v8' # attach a YOLOv8 detection head to YOLOv4n backbone+neck - ) - ``` - - - - To train a model, run - - - - ```python - from deeplite_torch_zoo.trainer import Detector - model = Detector(torch_model=model) # previously created YOLOv4n model - model.train(data='VOC.yaml', epochs=100, imgsz=480) # same arguments as the Ultralytics trainer object - ``` - - ## - -
      - Model naming conventions - - ## - - The model naming convention is that a model named `yolo8d67w25` is a YOLOv8 model with a depth factor of 0.67 and width factor of 0.25. Conventional depth/width factor value namings (n, s, m, l models) are used where possible. YOLOv6(s, m, l) models are considered to be different architectures due to differences other than the depth/width factor value. For every architecture, there are 3 variations in depth factor (0.33, 0.67, 1.0) and 4 variations in width factor (0.25, 0.5, 0.75, 1.0), except for YOLOv7 models, for which only width factor variations are considered while depth is fixed. -
      - - ## - - - Pareto-optimal models - - - ## - - COCO pre-trained models are ready for download. Other models coming soon! - """ - ) - - table_mode = gr.Radio( - ['Show top-10 models', 'Show all'], - value='Show top-10 models', - label='Pareto model table' - ) - - with gr.Row(): - # pareto_table = gr.DataFrame(interactive=False) - pareto_table = gr.HTML() - - gr.Markdown( - """ - ## Citation - ``` - Accepted at ICCV 2023 Workshop on Resource-Efficient Deep Learning for Computer Vision (RCV'23) - @article{lazarevich2023yolobench, - title={YOLOBench: Benchmarking Efficient Object Detectors on Embedded Systems}, - author={Lazarevich, Ivan and Grimaldi, Matteo and Kumar, Ravish and Mitra, Saptarshi and Khan, Shahrukh and Sah, Sudhakar}, - journal={arXiv preprint arXiv:2307.13901}, - year={2023} - } - ``` - """ - ) - - inputs = [dataset_name, hardware_name, metric_name, vis_options, table_mode] - - # plot by default (VOC, Raspi4) - demo.load( - fn=create_yolobench_plots, - inputs=inputs, - outputs=[upper_panel_fig, pareto_table], - ) - - demo.load( - fn=get_pareto_table, - inputs=[dataset_name, hardware_name, metric_name], - outputs=[pareto_table], - ) - - # update in case of dataset selection - dataset_name.change( - fn=create_yolobench_plots, - inputs=inputs, - outputs=[upper_panel_fig, pareto_table], - ) - # update in case of metric selection - metric_name.change( - fn=create_yolobench_plots, - inputs=inputs, - outputs=[upper_panel_fig, pareto_table], - ) - - vis_options.change( - fn=create_yolobench_plots, - inputs=inputs, - outputs=[upper_panel_fig, pareto_table], - ) - - table_mode.change( - fn=create_yolobench_plots, - inputs=inputs, - outputs=[upper_panel_fig, pareto_table], - ) - - # update in case of device selection - hardware_name.change( - fn=create_yolobench_plots, - inputs=inputs, - outputs=[upper_panel_fig, pareto_table], - ) - - hardware_name.change( - fn=get_hw_description, - inputs=[hardware_name], - outputs=[hardware_desc], - ) - - -if __name__ == "__main__": - demo.launch() \ No newline at end of file diff --git a/spaces/dfurman/chat-all-in/src/inf_server.py b/spaces/dfurman/chat-all-in/src/inf_server.py deleted file mode 100644 index 87b2788f7481f93b235988fa57ed380798adebd4..0000000000000000000000000000000000000000 --- a/spaces/dfurman/chat-all-in/src/inf_server.py +++ /dev/null @@ -1,36 +0,0 @@ -import logging -import time -from src.llm_boilers import llm_boiler - - -def call_inf_server(prompt, openai_key, episode_number): - model_id = "gpt-3.5-turbo-16k" - model = llm_boiler(model_id, openai_key) - - logging.warning(f'Inference via "{model_id}"" for prompt "{prompt}"') - - try: - # run text generation - response = model.run( - prompt, - temperature=1.0, - n_answers=5, - episode_number=episode_number, - ) - - logging.warning(f"Result of text generation: {response}") - return response - - except Exception as e: - # assume it is our error - # just wait and try one more time - print(e) - time.sleep(2) - response = model.run( - prompt, - temperature=1.0, - n_answers=5, - episode_number=episode_number, - ) - logging.warning(f"Result of text generation: {response}") - return response diff --git a/spaces/diacanFperku/AutoGPT/Billa Telugu Movie Bluray Torrent REPACK.md b/spaces/diacanFperku/AutoGPT/Billa Telugu Movie Bluray Torrent REPACK.md deleted file mode 100644 index 6a09034cb0dee9857fdb0c901147289e49a48a00..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Billa Telugu Movie Bluray Torrent REPACK.md +++ /dev/null @@ -1,14 +0,0 @@ -
      -

      Billa: A Stylish Telugu Action Thriller Starring Prabhas and Anushka Shetty

      -

      Billa is a 2009 Telugu action thriller film directed by Meher Ramesh and starring Prabhas, Anushka Shetty, Namitha and Krishnam Raju. It is a remake of the 2007 Tamil film of the same name, which itself was a remake of the 1978 Hindi film Don. The film follows Ranga, a small-time thief who resembles Billa, an international crime lord. When Billa is killed by the police, Ranga is recruited by an Interpol officer to impersonate him and infiltrate his gang. However, things get complicated when Ranga falls in love with Billa's girlfriend and a rival gangster plots to expose his identity.

      -

      Billa was released on 3 April 2009 and received positive reviews from critics and audiences. The film was praised for its stylish presentation, cinematography, music, action sequences and performances. Prabhas and Anushka Shetty were especially lauded for their chemistry and screen presence. The film was also a commercial success, grossing over ₹40 crore at the box office. It was one of the highest-grossing Telugu films of 2009 and established Prabhas as a leading star in the industry.

      -

      billa telugu movie bluray torrent


      Download Zip ✦✦✦ https://gohhs.com/2uFTBh



      -

      If you are a fan of Telugu cinema or action thrillers, you can watch Billa online on Voot[^1^], where it is available in full HD quality with Telugu audio. You can also download Billa in HD quality with Dolby Digital® 5.1 - 448Kbps sound from various torrent sites[^2^]. However, we do not endorse piracy and recommend you to watch the film legally on Voot or other streaming platforms.

      -

      Billa is a film that will keep you hooked with its fast-paced plot, stylish visuals, thrilling action and romantic moments. Don't miss this Telugu blockbuster starring Prabhas and Anushka Shetty!

      - -

      Billa is not just a remake of a Tamil film, but also a tribute to the original Hindi film Don, which starred Amitabh Bachchan in the title role. The film features several references and homages to the classic film, such as the dialogue "Don ko pakadna mushkil hi nahin, namumkin hai" (It is not just difficult to catch Don, it is impossible), the song "Khaike Paan Banaraswala" and the twist ending. The film also pays respect to Krishnam Raju, who played the role of Billa in the 1980 Telugu remake of Don. Krishnam Raju appears in a cameo role as Billa's father in the film.

      -

      Billa also boasts of a stellar soundtrack composed by Mani Sharma, who is known for his work in Telugu and Tamil cinema. The film features six songs, which are sung by prominent singers like Shreya Ghoshal, Ranjith, Naveen Madhav and Hemachandra. The songs range from romantic duets like "Bommali" and "Ne Pattasu" to peppy numbers like "My Name is Billa" and "Ellora Shilpanni". The songs are also well-choreographed and picturized, adding to the appeal of the film.

      -

      Billa is a film that has everything a Telugu movie lover can ask for: action, romance, comedy, drama and suspense. It is a film that showcases the talent and charisma of Prabhas and Anushka Shetty, who have since become one of the most popular pairs in Indian cinema. It is a film that will entertain you from start to finish with its engaging story, stunning visuals, catchy music and thrilling action. It is a film that you should not miss if you are a fan of Telugu cinema or action thrillers.

      -

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/CRACK-Icecream-Screen-Recorder-Pro-576-Activator-CracksMind.md b/spaces/diacanFperku/AutoGPT/CRACK-Icecream-Screen-Recorder-Pro-576-Activator-CracksMind.md deleted file mode 100644 index efec9165f14e4d87acf85cdcdec17962de5cf354..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/CRACK-Icecream-Screen-Recorder-Pro-576-Activator-CracksMind.md +++ /dev/null @@ -1,69 +0,0 @@ -## CRACK Icecream Screen Recorder Pro 5.76 Activator [CracksMind] - - - - ![CRACK Icecream Screen Recorder Pro 5.76 Activator \[CracksMind\]](https://antiqueskishop.com/blog/6-single-default/skier-family.jpg) - - - -**CLICK HERE ⇒⇒⇒ [https://conttooperting.blogspot.com/?l=2twNWK](https://conttooperting.blogspot.com/?l=2twNWK)** - - - -# How to Crack Icecream Screen Recorder Pro 5.76 with Activator [CracksMind] - - - -Icecream Screen Recorder Pro is a powerful and easy-to-use software that allows you to record your screen with audio, annotate and edit your recordings, share them online, and much more. It is one of the best screen recording software for Windows, Mac, and Android. However, it is not free and you need to purchase a license to use all its features. - - - -If you want to crack Icecream Screen Recorder Pro 5.76 and use it for free, you can follow these steps: - - - -1. Download the Icecream Screen Recorder Pro 5.76 setup file from the official website [here](https://icecreamapps.com/Screen-Recorder/). - -2. Install the software on your computer and run it. - -3. Download the Activator [CracksMind] file from [here](https://remcdbcrb.org/wp-content/uploads/2023/01/CRACK-Icecream-Screen-Recorder-Pro-576-Activator-CracksMind-FULL.pdf). This file contains the crack and instructions on how to use it. - -4. Extract the Activator [CracksMind] file and run the crack.exe file as administrator. - -5. Select Icecream Screen Recorder Pro from the list of programs and click on Activate. - -6. Wait for the activation process to complete and close the crack.exe file. - -7. Enjoy using Icecream Screen Recorder Pro 5.76 with all its features unlocked. - - - -Note: This method is only for educational purposes and we do not recommend or support cracking any software. Cracking software may violate the terms of service and may expose your computer to malware or viruses. Please purchase a license from the official website if you like the software and want to support the developers. - - - -Now that you have cracked Icecream Screen Recorder Pro 5.76, you can enjoy its amazing features. Here are some of the things you can do with this software: - - - -- Record full screen or any area of your screen with high quality and no watermark. - -- Record webcam and microphone along with your screen to create engaging videos. - -- Draw, add text, arrows, shapes, and other annotations to your recordings in real time. - -- Trim, crop, rotate, speed up, slow down, mute, and add effects to your videos with the built-in video editor. - -- Convert your videos to MP4, AVI, MOV, WEBM formats with H264, MPEG4, VP8 codecs. - -- Schedule your screen recordings to start and stop automatically at a specific time and date. - -- Add your own logo or watermark to your videos to protect your intellectual property. - -- Upload your videos to the Icecream Apps cloud storage and share them online with anyone. - - - -Icecream Screen Recorder Pro 5.76 is a versatile and user-friendly software that can help you create professional-looking screen recordings for various purposes. Whether you want to record a webinar, a tutorial, a game, a presentation, or anything else on your screen, this software can handle it. However, cracking software is illegal and unethical and may harm your computer and data. Therefore, we strongly advise you to buy a license from the official website and support the developers of this amazing software. - - 1b8d091108 \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Choices Pre-intermediate Teachers Book Free Download ((FULL)).md b/spaces/diacanFperku/AutoGPT/Choices Pre-intermediate Teachers Book Free Download ((FULL)).md deleted file mode 100644 index 354e8e7fdaf163a8c8b462c0c9c26c6dde18ddb0..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Choices Pre-intermediate Teachers Book Free Download ((FULL)).md +++ /dev/null @@ -1,6 +0,0 @@ -

      Choices Pre-intermediate Teacher's Book Free Download


      Download File === https://gohhs.com/2uFSY4



      -
      -Choices Pre-intermediate Teacher's Book - Free download Ebook, ... choices pre intermediate students book or read online books in PDF, ... 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/diacanFperku/AutoGPT/FULL Kong.Audio.ChineeGuanZi.VSTi.v1.31-ASSiGN !!EXCLUSIVE!!.md b/spaces/diacanFperku/AutoGPT/FULL Kong.Audio.ChineeGuanZi.VSTi.v1.31-ASSiGN !!EXCLUSIVE!!.md deleted file mode 100644 index 1dccf8117cef73cc9e63c981c833ea33285ab567..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/FULL Kong.Audio.ChineeGuanZi.VSTi.v1.31-ASSiGN !!EXCLUSIVE!!.md +++ /dev/null @@ -1,5 +0,0 @@ - -

      Plug-ins: INSTRUMENTA MUSIC Library, Second Life, VSTi, VST, SoundFont2. Kong.Audio.Chineeguanzi.v1.1.0.READ.NFO-Kong.Audio.ChineeGuanZi.VSTi.v1.1.zip. zip.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.vSTANDARD. Kong.Audio.Chineeguanzi.v1.0.zip. Kong.Audio.Chineeguanzi.v1.0.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.vSTANDARD. Kong.Audio.Chineeguanzi.v1.0.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.vSTANDARD. Kong.Audio.Chineeguanzi.v1.0.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.vSTANDARD. Kong.Audio.Chineeguanzi.v1.0.zip. FULL Kong.Audio.ChineeGuanZi.VSTi.v1.31-ASSiGN

      Downloads: Kong.Audio.Kood.VST.zip (VST STANDARD) http://www.coub.com/downloads/kong.audio.kood.vst.rar.zip.png Kong.Audio.Kood.VST.zip.png Instruments: VSTi and Plugin Instruments and Synthesizers: VSTi and Plugin. Kong.Audio.Chineeguanzi.VSTi.v1.0.zip. Assign. Kong.Audio.Chineeguanzi.VSTi.v1.0.zip. Kong.Audio.Chineeguanzi.VSTi.v1.0.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.zip. FULL Kong.Audio.ChineeGuanZi.VSTi.v1.31-ASSiGN

      from Vincent Zhou (original) Kong.Audio.Chineeguanzi.v1.0.zip. Kong.Audio.Chineeguanzi.v1.0.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.zip. Kong.Audio.Kood.VST.zip. FULL Kong.Audio.ChineeGuanZi.

      -

      FULL Kong.Audio.ChineeGuanZi.VSTi.v1.31-ASSiGN


      Download === https://gohhs.com/2uFUvf



      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Re Loader Activator Download Free ((FREE)).md b/spaces/diacanFperku/AutoGPT/Re Loader Activator Download Free ((FREE)).md deleted file mode 100644 index 0853308eabfa581faaacd2d852e733ef128ccbbe..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Re Loader Activator Download Free ((FREE)).md +++ /dev/null @@ -1,30 +0,0 @@ -
      -

      How to Activate Windows and Office with Re Loader Activator

      -

      If you are looking for a simple and effective way to activate your Windows or Office products, you may want to try Re Loader Activator. This is a free tool that can activate any version of Windows and Office with just one click. In this article, we will show you how to download and use Re Loader Activator to get your software up and running.

      -

      Re Loader Activator Download Free


      Download File >>> https://gohhs.com/2uFUg2



      -

      What is Re Loader Activator?

      -

      Re Loader Activator is a software that can activate various Microsoft products, such as Windows 7/8/10/11, Office 2010/2013/2016, and server platforms like 2008 R2 / 2012 R2 / 2016. It works by using license keys from official sources like Microsoft itself or genuine product keys purchased from authorized retailers. It does not require any special hardware or internet connection to activate your software. It also supports both 32-bit and 64-bit versions of Windows and Office.

      -

      Re Loader Activator has been developed by the Re-Loader Team, who have previously released several activators for different versions of Windows. The latest version (v3.0) contains advanced features that make it easier to use than ever before and gives users an unlimited number of free activations. Re Loader Activator stands out from other similar tools due to its simplicity and reliability. It does not modify any system files or registry entries, making it safe and secure to use.

      -

      How to Download Re Loader Activator?

      -

      You can download Re Loader Activator for free from various online sources. However, you should be careful about the source you choose, as some websites may contain malware or viruses that can harm your computer. We recommend you to download Re Loader Activator from the official website[^1^] or from a trusted source like Google Drive[^3^]. Here are the steps to download Re Loader Activator:

      -
        -
      1. Go to the official website[^1^] or Google Drive[^3^] link and click on the download button.
      2. -
      3. Save the file (Re-Loader-Activator-v3.0.zip) on your computer and extract it using a program like WinRAR or 7-Zip.
      4. -
      5. Open the extracted folder and run the executable file (Re-Loader-Activator-v3.0.exe) as administrator.
      6. -
      -

      How to Use Re Loader Activator?

      -

      Using Re Loader Activator is very easy and straightforward. You don't need any technical knowledge or experience to operate it successfully. Here are the steps to use Re Loader Activator:

      -

      -
        -
      1. After running the executable file, you will see a window with various options for activating Windows and Office.
      2. -
      3. Select the product you want to activate (Windows or Office) and click on the green button below it.
      4. -
      5. Wait for a few seconds while Re Loader Activator detects your version of Windows or Office and automatically selects the correct product keys needed for activation.
      6. -
      7. If the activation is successful, you will see a message saying "Activation Done!" and a green check mark next to your product.
      8. -
      9. If the activation fails due to some technical issue, you can try again with another set of product keys provided by Re Loader Activator or manually enter them yourself if you have them.
      10. -
      11. You can also reset your trial versions of Windows or Office back to their original state by clicking on the red button below them.
      12. -
      13. Close the program and restart your computer for the changes to take effect.
      14. -
      -

      Conclusion

      -

      Re Loader Activator is a powerful and efficient tool for activating any version of Windows and Office with just one click. It is free, safe, and easy to use. It can save you time and money by avoiding buying expensive license keys from Microsoft. If you want to enjoy your Windows or Office software without any limitations, you should give Re Loader Activator a try.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/digitalxingtong/Shanbao-Bert-VITS2/text/english_bert_mock.py b/spaces/digitalxingtong/Shanbao-Bert-VITS2/text/english_bert_mock.py deleted file mode 100644 index 3b894ced5b6d619a18d6bdd7d7606ba9e6532050..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Shanbao-Bert-VITS2/text/english_bert_mock.py +++ /dev/null @@ -1,5 +0,0 @@ -import torch - - -def get_bert_feature(norm_text, word2ph): - return torch.zeros(1024, sum(word2ph)) diff --git a/spaces/dilums/sentence-similarity/components/ui/table.tsx b/spaces/dilums/sentence-similarity/components/ui/table.tsx deleted file mode 100644 index eb17c5a0447cfe22bdeaa1f2a3690499c4552463..0000000000000000000000000000000000000000 --- a/spaces/dilums/sentence-similarity/components/ui/table.tsx +++ /dev/null @@ -1,117 +0,0 @@ -import * as React from "react" - -import { cn } from "@/lib/utils" - -const Table = React.forwardRef< - HTMLTableElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -
      - - -)) -Table.displayName = "Table" - -const TableHeader = React.forwardRef< - HTMLTableSectionElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( - -)) -TableHeader.displayName = "TableHeader" - -const TableBody = React.forwardRef< - HTMLTableSectionElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( - -)) -TableBody.displayName = "TableBody" - -const TableFooter = React.forwardRef< - HTMLTableSectionElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( - -)) -TableFooter.displayName = "TableFooter" - -const TableRow = React.forwardRef< - HTMLTableRowElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( - -)) -TableRow.displayName = "TableRow" - -const TableHead = React.forwardRef< - HTMLTableCellElement, - React.ThHTMLAttributes ->(({ className, ...props }, ref) => ( -
      [role=checkbox]]:translate-y-[2px]", - className - )} - {...props} - /> -)) -TableHead.displayName = "TableHead" - -const TableCell = React.forwardRef< - HTMLTableCellElement, - React.TdHTMLAttributes ->(({ className, ...props }, ref) => ( - [role=checkbox]]:translate-y-[2px]", - className - )} - {...props} - /> -)) -TableCell.displayName = "TableCell" - -const TableCaption = React.forwardRef< - HTMLTableCaptionElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -
      -)) -TableCaption.displayName = "TableCaption" - -export { - Table, - TableHeader, - TableBody, - TableFooter, - TableHead, - TableRow, - TableCell, - TableCaption, -} diff --git a/spaces/dineshreddy/WALT/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py b/spaces/dineshreddy/WALT/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py deleted file mode 100644 index af12cb06786ce17df331ac74e41b563b294387c0..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py +++ /dev/null @@ -1,531 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import Conv2d, ConvModule, build_upsample_layer -from mmcv.ops.carafe import CARAFEPack -from mmcv.runner import auto_fp16, force_fp32 -from torch.nn.modules.utils import _pair - -from mmdet.core import mask_target -from mmdet.models.builder import HEADS, build_loss - -BYTES_PER_FLOAT = 4 -# TODO: This memory limit may be too much or too little. It would be better to -# determine it based on available resources. -GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit - - -@HEADS.register_module() -class FCNMaskHead(nn.Module): - - def __init__(self, - num_convs=4, - roi_feat_size=14, - in_channels=256, - conv_kernel_size=3, - conv_out_channels=256, - num_classes=80, - class_agnostic=False, - upsample_cfg=dict(type='deconv', scale_factor=2), - conv_cfg=None, - norm_cfg=None, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)): - super(FCNMaskHead, self).__init__() - self.upsample_cfg = upsample_cfg.copy() - if self.upsample_cfg['type'] not in [ - None, 'deconv', 'nearest', 'bilinear', 'carafe' - ]: - raise ValueError( - f'Invalid upsample method {self.upsample_cfg["type"]}, ' - 'accepted methods are "deconv", "nearest", "bilinear", ' - '"carafe"') - self.num_convs = num_convs - # WARN: roi_feat_size is reserved and not used - self.roi_feat_size = _pair(roi_feat_size) - self.in_channels = in_channels - self.conv_kernel_size = conv_kernel_size - self.conv_out_channels = conv_out_channels - self.upsample_method = self.upsample_cfg.get('type') - self.scale_factor = self.upsample_cfg.pop('scale_factor', None) - self.num_classes = num_classes - self.class_agnostic = class_agnostic - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.fp16_enabled = False - self.loss_mask = build_loss(loss_mask) - - self.convs = nn.ModuleList() - for i in range(self.num_convs): - in_channels = ( - self.in_channels if i == 0 else self.conv_out_channels) - padding = (self.conv_kernel_size - 1) // 2 - self.convs.append( - ConvModule( - in_channels, - self.conv_out_channels, - self.conv_kernel_size, - padding=padding, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg)) - upsample_in_channels = ( - self.conv_out_channels if self.num_convs > 0 else in_channels) - upsample_cfg_ = self.upsample_cfg.copy() - if self.upsample_method is None: - self.upsample = None - elif self.upsample_method == 'deconv': - upsample_cfg_.update( - in_channels=upsample_in_channels, - out_channels=self.conv_out_channels, - kernel_size=self.scale_factor, - stride=self.scale_factor) - self.upsample = build_upsample_layer(upsample_cfg_) - elif self.upsample_method == 'carafe': - upsample_cfg_.update( - channels=upsample_in_channels, scale_factor=self.scale_factor) - self.upsample = build_upsample_layer(upsample_cfg_) - else: - # suppress warnings - align_corners = (None - if self.upsample_method == 'nearest' else False) - upsample_cfg_.update( - scale_factor=self.scale_factor, - mode=self.upsample_method, - align_corners=align_corners) - self.upsample = build_upsample_layer(upsample_cfg_) - - out_channels = 1 if self.class_agnostic else self.num_classes - logits_in_channel = ( - self.conv_out_channels - if self.upsample_method == 'deconv' else upsample_in_channels) - self.conv_logits = Conv2d(logits_in_channel, out_channels, 1) - self.relu = nn.ReLU(inplace=True) - self.debug_imgs = None - - def init_weights(self): - for m in [self.upsample, self.conv_logits]: - if m is None: - continue - elif isinstance(m, CARAFEPack): - m.init_weights() - else: - nn.init.kaiming_normal_( - m.weight, mode='fan_out', nonlinearity='relu') - nn.init.constant_(m.bias, 0) - - @auto_fp16() - def forward(self, x): - for conv in self.convs: - x = conv(x) - if self.upsample is not None: - x = self.upsample(x) - if self.upsample_method == 'deconv': - x = self.relu(x) - mask_pred = self.conv_logits(x) - return mask_pred - - def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg): - pos_proposals = [res.pos_bboxes for res in sampling_results] - pos_assigned_gt_inds = [ - res.pos_assigned_gt_inds for res in sampling_results - ] - mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, - gt_masks, rcnn_train_cfg) - return mask_targets - - @force_fp32(apply_to=('mask_pred', )) - def loss(self, mask_pred, mask_targets, labels): - """ - Example: - >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA - >>> N = 7 # N = number of extracted ROIs - >>> C, H, W = 11, 32, 32 - >>> # Create example instance of FCN Mask Head. - >>> # There are lots of variations depending on the configuration - >>> self = FCNMaskHead(num_classes=C, num_convs=1) - >>> inputs = torch.rand(N, self.in_channels, H, W) - >>> mask_pred = self.forward(inputs) - >>> sf = self.scale_factor - >>> labels = torch.randint(0, C, size=(N,)) - >>> # With the default properties the mask targets should indicate - >>> # a (potentially soft) single-class label - >>> mask_targets = torch.rand(N, H * sf, W * sf) - >>> loss = self.loss(mask_pred, mask_targets, labels) - >>> print('loss = {!r}'.format(loss)) - """ - loss = dict() - if mask_pred.size(0) == 0: - loss_mask = mask_pred.sum() - else: - if self.class_agnostic: - loss_mask = self.loss_mask(mask_pred, mask_targets, - torch.zeros_like(labels)) - else: - #print(mask_pred[:,0:1].shape, mask_targets[0::2].shape, labels.shape) - loss_mask_vis = self.loss_mask(mask_pred[:,0:1], mask_targets[0::2], labels) - loss_mask_full = self.loss_mask(mask_pred[:,1:2], mask_targets[1::2], labels) - loss['loss_mask_vis'] = loss_mask_vis - loss['loss_mask_full'] = loss_mask_full - return loss - - def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, - ori_shape, scale_factor, rescale): - """Get segmentation masks from mask_pred and bboxes. - Args: - mask_pred (Tensor or ndarray): shape (n, #class, h, w). - For single-scale testing, mask_pred is the direct output of - model, whose type is Tensor, while for multi-scale testing, - it will be converted to numpy array outside of this method. - det_bboxes (Tensor): shape (n, 4/5) - det_labels (Tensor): shape (n, ) - rcnn_test_cfg (dict): rcnn testing config - ori_shape (Tuple): original image height and width, shape (2,) - scale_factor(float | Tensor): If ``rescale is True``, box - coordinates are divided by this scale factor to fit - ``ori_shape``. - rescale (bool): If True, the resulting masks will be rescaled to - ``ori_shape``. - Returns: - list[list]: encoded masks. The c-th item in the outer list - corresponds to the c-th class. Given the c-th outer list, the - i-th item in that inner list is the mask for the i-th box with - class label c. - Example: - >>> import mmcv - >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA - >>> N = 7 # N = number of extracted ROIs - >>> C, H, W = 11, 32, 32 - >>> # Create example instance of FCN Mask Head. - >>> self = FCNMaskHead(num_classes=C, num_convs=0) - >>> inputs = torch.rand(N, self.in_channels, H, W) - >>> mask_pred = self.forward(inputs) - >>> # Each input is associated with some bounding box - >>> det_bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N) - >>> det_labels = torch.randint(0, C, size=(N,)) - >>> rcnn_test_cfg = mmcv.Config({'mask_thr_binary': 0, }) - >>> ori_shape = (H * 4, W * 4) - >>> scale_factor = torch.FloatTensor((1, 1)) - >>> rescale = False - >>> # Encoded masks are a list for each category. - >>> encoded_masks = self.get_seg_masks( - >>> mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, - >>> scale_factor, rescale - >>> ) - >>> assert len(encoded_masks) == C - >>> assert sum(list(map(len, encoded_masks))) == N - """ - if isinstance(mask_pred, torch.Tensor): - mask_pred = mask_pred.sigmoid() - else: - mask_pred = det_bboxes.new_tensor(mask_pred) - - device = mask_pred.device - cls_segms = [[] for _ in range(self.num_classes) - ] # BG is not included in num_classes - bboxes = det_bboxes[:, :4] - labels = det_labels - - if rescale: - img_h, img_w = ori_shape[:2] - else: - if isinstance(scale_factor, float): - img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32) - img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32) - else: - w_scale, h_scale = scale_factor[0], scale_factor[1] - img_h = np.round(ori_shape[0] * h_scale.item()).astype( - np.int32) - img_w = np.round(ori_shape[1] * w_scale.item()).astype( - np.int32) - scale_factor = 1.0 - - if not isinstance(scale_factor, (float, torch.Tensor)): - scale_factor = bboxes.new_tensor(scale_factor) - bboxes = bboxes / scale_factor - - if torch.onnx.is_in_onnx_export(): - # TODO: Remove after F.grid_sample is supported. - from torchvision.models.detection.roi_heads \ - import paste_masks_in_image - masks = paste_masks_in_image(mask_pred, bboxes, ori_shape[:2]) - thr = rcnn_test_cfg.get('mask_thr_binary', 0) - if thr > 0: - masks = masks >= thr - return masks - - N = len(mask_pred) - # The actual implementation split the input into chunks, - # and paste them chunk by chunk. - if device.type == 'cpu': - # CPU is most efficient when they are pasted one by one with - # skip_empty=True, so that it performs minimal number of - # operations. - num_chunks = N - else: - # GPU benefits from parallelism for larger chunks, - # but may have memory issue - num_chunks = int( - np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT)) - assert (num_chunks <= - N), 'Default GPU_MEM_LIMIT is too small; try increasing it' - chunks = torch.chunk(torch.arange(N, device=device), num_chunks) - - threshold = rcnn_test_cfg.mask_thr_binary - im_mask = torch.zeros( - N, - img_h, - img_w, - device=device, - dtype=torch.bool if threshold >= 0 else torch.uint8) - - if not self.class_agnostic: - mask_pred = mask_pred[range(N), labels][:, None] - - for inds in chunks: - masks_chunk, spatial_inds = _do_paste_mask( - mask_pred[inds], - bboxes[inds], - img_h, - img_w, - skip_empty=device.type == 'cpu') - - if threshold >= 0: - masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool) - else: - # for visualization and debugging - masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8) - - im_mask[(inds, ) + spatial_inds] = masks_chunk - - for i in range(N): - cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy()) - return cls_segms - - def get_seg_masks1(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, - ori_shape, scale_factor, rescale): - """Get segmentation masks from mask_pred and bboxes. - - Args: - mask_pred (Tensor or ndarray): shape (n, #class, h, w). - For single-scale testing, mask_pred is the direct output of - model, whose type is Tensor, while for multi-scale testing, - it will be converted to numpy array outside of this method. - det_bboxes (Tensor): shape (n, 4/5) - det_labels (Tensor): shape (n, ) - rcnn_test_cfg (dict): rcnn testing config - ori_shape (Tuple): original image height and width, shape (2,) - scale_factor(float | Tensor): If ``rescale is True``, box - coordinates are divided by this scale factor to fit - ``ori_shape``. - rescale (bool): If True, the resulting masks will be rescaled to - ``ori_shape``. - - Returns: - list[list]: encoded masks. The c-th item in the outer list - corresponds to the c-th class. Given the c-th outer list, the - i-th item in that inner list is the mask for the i-th box with - class label c. - - Example: - >>> import mmcv - >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA - >>> N = 7 # N = number of extracted ROIs - >>> C, H, W = 11, 32, 32 - >>> # Create example instance of FCN Mask Head. - >>> self = FCNMaskHead(num_classes=C, num_convs=0) - >>> inputs = torch.rand(N, self.in_channels, H, W) - >>> mask_pred = self.forward(inputs) - >>> # Each input is associated with some bounding box - >>> det_bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N) - >>> det_labels = torch.randint(0, C, size=(N,)) - >>> rcnn_test_cfg = mmcv.Config({'mask_thr_binary': 0, }) - >>> ori_shape = (H * 4, W * 4) - >>> scale_factor = torch.FloatTensor((1, 1)) - >>> rescale = False - >>> # Encoded masks are a list for each category. - >>> encoded_masks = self.get_seg_masks( - >>> mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, - >>> scale_factor, rescale - >>> ) - >>> assert len(encoded_masks) == C - >>> assert sum(list(map(len, encoded_masks))) == N - """ - if isinstance(mask_pred, torch.Tensor): - mask_pred = mask_pred.sigmoid() - else: - mask_pred = det_bboxes.new_tensor(mask_pred) - - device = mask_pred.device - cls_segms = [[] for _ in range(self.num_classes) - ] # BG is not included in num_classes - bboxes = det_bboxes[:, :4] - labels = det_labels - labels = torch.cat((labels, torch.tensor(([1])))) - bboxes = torch.cat((bboxes, bboxes)) - #print(labels,torch.tensor(([1]))) - #asas - - if rescale: - img_h, img_w = ori_shape[:2] - else: - if isinstance(scale_factor, float): - img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32) - img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32) - else: - w_scale, h_scale = scale_factor[0], scale_factor[1] - img_h = np.round(ori_shape[0] * h_scale.item()).astype( - np.int32) - img_w = np.round(ori_shape[1] * w_scale.item()).astype( - np.int32) - scale_factor = 1.0 - - if not isinstance(scale_factor, (float, torch.Tensor)): - scale_factor = bboxes.new_tensor(scale_factor) - bboxes = bboxes / scale_factor - - if torch.onnx.is_in_onnx_export(): - # TODO: Remove after F.grid_sample is supported. - from torchvision.models.detection.roi_heads \ - import paste_masks_in_image - masks = paste_masks_in_image(mask_pred, bboxes, ori_shape[:2]) - thr = rcnn_test_cfg.get('mask_thr_binary', 0) - if thr > 0: - masks = masks >= thr - return masks - - N = len(mask_pred) - # The actual implementation split the input into chunks, - # and paste them chunk by chunk. - if device.type == 'cpu': - # CPU is most efficient when they are pasted one by one with - # skip_empty=True, so that it performs minimal number of - # operations. - num_chunks = N - else: - # GPU benefits from parallelism for larger chunks, - # but may have memory issue - num_chunks = int( - np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT)) - assert (num_chunks <= - N), 'Default GPU_MEM_LIMIT is too small; try increasing it' - chunks = torch.chunk(torch.arange(N, device=device), num_chunks) - - threshold = rcnn_test_cfg.mask_thr_binary - im_mask = torch.zeros( - N, - img_h, - img_w, - device=device, - dtype=torch.bool if threshold >= 0 else torch.uint8) - - if not self.class_agnostic: - mask_pred = mask_pred[range(N), labels][:, None] - #print('-----------------------------') - #print(chunks) - - for inds in chunks: - #print(mask_pred[inds].shape, bboxes[inds].shape) - masks_chunk, spatial_inds = _do_paste_mask( - mask_pred[0:1], - bboxes[inds], - img_h, - img_w, - skip_empty=device.type == 'cpu') - masks_chunk_occ, spatial_inds_occ = _do_paste_mask( - mask_pred[1:2], - bboxes[inds], - img_h, - img_w, - skip_empty=device.type == 'cpu') - - - if threshold >= 0: - masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool) - masks_chunk_occ = (masks_chunk_occ >= threshold).to(dtype=torch.bool) - else: - # for visualization and debugging - masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8) - - im_mask[([0], ) + spatial_inds] = masks_chunk - im_mask[([1], ) + spatial_inds] = masks_chunk_occ - - - for i in range(N): - cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy()) - #print(cls_segms) - return cls_segms - - -def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True): - """Paste instance masks according to boxes. - - This implementation is modified from - https://github.com/facebookresearch/detectron2/ - - Args: - masks (Tensor): N, 1, H, W - boxes (Tensor): N, 4 - img_h (int): Height of the image to be pasted. - img_w (int): Width of the image to be pasted. - skip_empty (bool): Only paste masks within the region that - tightly bound all boxes, and returns the results this region only. - An important optimization for CPU. - - Returns: - tuple: (Tensor, tuple). The first item is mask tensor, the second one - is the slice object. - If skip_empty == False, the whole image will be pasted. It will - return a mask of shape (N, img_h, img_w) and an empty tuple. - If skip_empty == True, only area around the mask will be pasted. - A mask of shape (N, h', w') and its start and end coordinates - in the original image will be returned. - """ - # On GPU, paste all masks together (up to chunk size) - # by using the entire image to sample the masks - # Compared to pasting them one by one, - # this has more operations but is faster on COCO-scale dataset. - device = masks.device - if skip_empty: - x0_int, y0_int = torch.clamp( - boxes.min(dim=0).values.floor()[:2] - 1, - min=0).to(dtype=torch.int32) - x1_int = torch.clamp( - boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32) - y1_int = torch.clamp( - boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32) - else: - x0_int, y0_int = 0, 0 - x1_int, y1_int = img_w, img_h - x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1 - - N = masks.shape[0] - - img_y = torch.arange( - y0_int, y1_int, device=device, dtype=torch.float32) + 0.5 - img_x = torch.arange( - x0_int, x1_int, device=device, dtype=torch.float32) + 0.5 - img_y = (img_y - y0) / (y1 - y0) * 2 - 1 - img_x = (img_x - x0) / (x1 - x0) * 2 - 1 - # img_x, img_y have shapes (N, w), (N, h) - if torch.isinf(img_x).any(): - inds = torch.where(torch.isinf(img_x)) - img_x[inds] = 0 - if torch.isinf(img_y).any(): - inds = torch.where(torch.isinf(img_y)) - img_y[inds] = 0 - - gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1)) - gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1)) - grid = torch.stack([gx, gy], dim=3) - - if torch.onnx.is_in_onnx_export(): - raise RuntimeError( - 'Exporting F.grid_sample from Pytorch to ONNX is not supported.') - img_masks = F.grid_sample( - masks.to(dtype=torch.float32), grid, align_corners=False) - - if skip_empty: - return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int)) - else: - return img_masks[:, 0], () diff --git a/spaces/doluvor/faster-whisper-webui/LICENSE.md b/spaces/doluvor/faster-whisper-webui/LICENSE.md deleted file mode 100644 index f5f4b8b5ecd27c09e4ef16e9662bcb7bb2bfc76f..0000000000000000000000000000000000000000 --- a/spaces/doluvor/faster-whisper-webui/LICENSE.md +++ /dev/null @@ -1,195 +0,0 @@ -Apache License -============== - -_Version 2.0, January 2004_ -_<>_ - -### Terms and Conditions for use, reproduction, and distribution - -#### 1. Definitions - -“License” shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -“Licensor” shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -“Legal Entity” shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, “control” means **(i)** the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the -outstanding shares, or **(iii)** beneficial ownership of such entity. - -“You” (or “Your”) shall mean an individual or Legal Entity exercising -permissions granted by this License. - -“Source” form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -“Object” form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -“Work” shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -“Derivative Works” shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -“Contribution” shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -“submitted” means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as “Not a Contribution.” - -“Contributor” shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -#### 2. Grant of Copyright License - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -#### 3. Grant of Patent License - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -#### 4. Redistribution - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -* **(a)** You must give any other recipients of the Work or Derivative Works a copy of -this License; and -* **(b)** You must cause any modified files to carry prominent notices stating that You -changed the files; and -* **(c)** You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -* **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. - -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -#### 5. Submission of Contributions - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -#### 6. Trademarks - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -#### 7. Disclaimer of Warranty - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an “AS IS” BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -#### 8. Limitation of Liability - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -#### 9. Accepting Warranty or Additional Liability - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -_END OF TERMS AND CONDITIONS_ - -### APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets `[]` replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same “printed page” as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/spaces/dpaulsoria/AnimalDetector/app.py b/spaces/dpaulsoria/AnimalDetector/app.py deleted file mode 100644 index 684252645da2ea6584d421c0061909dd3d08ef8b..0000000000000000000000000000000000000000 --- a/spaces/dpaulsoria/AnimalDetector/app.py +++ /dev/null @@ -1,66 +0,0 @@ -import gradio as gr -import cv2 - -from ultralytics import YOLO - -examples = [ - ['./1.JPG'], - ['./2.JPG'], - ['./3.JPG'], - # ['./4.JPG'], - ['./5.JPG'], - ['./6.JPG'], -] - - -#Cargar modelo entrenado -model = YOLO('best.pt') - -#Definir funcion que ejecuta la interfaz definida (en este caso es solo una interfaz, pero pueden ser algunas) -#La interfaz solo recibe una entrada (La imagen ingresada en el cargador de path de imagenes), por lo -# q ue solo se define un parametro de entrada en la funcion. -def show_results(loaded_image): - #Se generan las salidas (detecciones) pidiendo al modelo que prediga a partir de la imagen de entrada - outputs = model.predict(source=loaded_image) - results = outputs[0].cpu().numpy() - #Se carga la imagen usando openCV para poder editarla - image = cv2.imread(loaded_image) - #Se recorre cada boundingBox detectado y para cada uno se pinta un rectangulo y se escribe un id. - for i, det in enumerate(results.boxes.xyxy): - cv2.rectangle(image, - (int(det[0]), int(det[1])), - (int(det[2]), int(det[3])), - color=(0, 0, 255), - thickness=2, - lineType=cv2.LINE_AA - ) - cv2.putText(image, - text =f"id:{i}", - org=(int(det[0]), int(det[1])), - fontFace =cv2.FONT_HERSHEY_SIMPLEX, - fontScale=1, - color=(0,0,255), - thickness=1, - lineType=cv2.LINE_AA - ) - #Se retornan las 2 salidas definidas(imagen y texto): la imagen resultante (image) y un texto indicando cuantos boundingBox se encontraron - return cv2.cvtColor(image, cv2.COLOR_BGR2RGB), len(results.boxes) - - -inputs = [gr.components.Image(type="filepath", label="Input Image"), - ] -outputs= [gr.components.Image(type="numpy", label="Output Image"), - gr.Textbox(label="Total:") - ] - -interface = gr.Interface(fn=show_results, - inputs=inputs, - outputs=outputs, - title="Animal Detector", - #En la interfaz se pueden incluir ejemplos de lo que se espera como entrada o entradas. En este caso, - # la entrada es una imagen por lo que se pueden poner imagenes de ejemplo (deben estar subidas en el repositorio - # y con el path correctamente referenciado) - examples=examples, - ) - -interface.launch() diff --git a/spaces/dumitrescustefan/romanian-text-generation/README.md b/spaces/dumitrescustefan/romanian-text-generation/README.md deleted file mode 100644 index 8b995bb499968a067e6e784577d1ecec41490410..0000000000000000000000000000000000000000 --- a/spaces/dumitrescustefan/romanian-text-generation/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Romanian Text Generation -emoji: 😻 -colorFrom: indigo -colorTo: blue -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/elozano/news-analyzer/analyzer.py b/spaces/elozano/news-analyzer/analyzer.py deleted file mode 100644 index ed0103b0803d629310f7b4843771fd49ea83d007..0000000000000000000000000000000000000000 --- a/spaces/elozano/news-analyzer/analyzer.py +++ /dev/null @@ -1,70 +0,0 @@ -from typing import Dict, Optional, Union - -from transformers import ( - AutoModelForSequenceClassification, - AutoModelForTokenClassification, - BertTokenizer, - AutoTokenizer, - TokenClassificationPipeline, -) - -from pipeline import NewsPipeline - -CATEGORY_EMOJIS = { - "Automobile": "🚗", - "Entertainment": "🍿", - "Politics": "⚖️", - "Science": "🧪", - "Sports": "🏀", - "Technology": "💻", - "World": "🌍", -} -FAKE_EMOJIS = {"Fake": "👻", "Real": "👍"} -CLICKBAIT_EMOJIS = {"Clickbait": "🎣", "Normal": "✅"} - - -class NewsAnalyzer: - def __init__( - self, - category_model_name: str, - fake_model_name: str, - clickbait_model_name: str, - ner_model_name: str, - ) -> None: - self.category_pipe = NewsPipeline( - model=AutoModelForSequenceClassification.from_pretrained( - category_model_name - ), - tokenizer=BertTokenizer.from_pretrained(category_model_name), - emojis=CATEGORY_EMOJIS, - ) - self.fake_pipe = NewsPipeline( - model=AutoModelForSequenceClassification.from_pretrained(fake_model_name), - tokenizer=BertTokenizer.from_pretrained(fake_model_name), - emojis=FAKE_EMOJIS, - ) - self.clickbait_pipe = NewsPipeline( - model=AutoModelForSequenceClassification.from_pretrained( - clickbait_model_name - ), - tokenizer=BertTokenizer.from_pretrained(clickbait_model_name), - emojis=CLICKBAIT_EMOJIS, - ) - self.ner_pipe = TokenClassificationPipeline( - model=AutoModelForTokenClassification.from_pretrained(ner_model_name), - tokenizer=AutoTokenizer.from_pretrained(ner_model_name), - aggregation_strategy="simple", - ) - - def __call__( - self, headline: str, content: Optional[str] = None - ) -> Dict[str, Union[str, float]]: - return { - "category": self.category_pipe(headline=headline, content=content), - "fake": self.fake_pipe(headline=headline, content=content), - "clickbait": self.clickbait_pipe(headline=headline, content=None), - "ner": { - "headline": self.ner_pipe(headline), - "content": self.ner_pipe(content) if content else None, - }, - } diff --git a/spaces/enesbol/case_dif/trainer.py b/spaces/enesbol/case_dif/trainer.py deleted file mode 100644 index 73b62b243f1d4dfce563592c2914b27f6e788251..0000000000000000000000000000000000000000 --- a/spaces/enesbol/case_dif/trainer.py +++ /dev/null @@ -1,293 +0,0 @@ -""" -author: Min Seok Lee and Wooseok Shin -""" -import os -import cv2 -import time -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from tqdm import tqdm -from dataloader import get_train_augmentation, get_test_augmentation, get_loader, gt_to_tensor -from util.utils import AvgMeter -from util.metrics import Evaluation_metrics -from util.losses import Optimizer, Scheduler, Criterion -from model.TRACER import TRACER - - -class Trainer(): - def __init__(self, args, save_path): - super(Trainer, self).__init__() - self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - self.size = args.img_size - - self.tr_img_folder = os.path.join(args.data_path, args.dataset, 'Train/images/') - self.tr_gt_folder = os.path.join(args.data_path, args.dataset, 'Train/masks/') - self.tr_edge_folder = os.path.join(args.data_path, args.dataset, 'Train/edges/') - - self.train_transform = get_train_augmentation(img_size=args.img_size, ver=args.aug_ver) - self.test_transform = get_test_augmentation(img_size=args.img_size) - - self.train_loader = get_loader(self.tr_img_folder, self.tr_gt_folder, self.tr_edge_folder, phase='train', - batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, - transform=self.train_transform, seed=args.seed) - self.val_loader = get_loader(self.tr_img_folder, self.tr_gt_folder, self.tr_edge_folder, phase='val', - batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, - transform=self.test_transform, seed=args.seed) - - # Network - self.model = TRACER(args).to(self.device) - - if args.multi_gpu: - self.model = nn.DataParallel(self.model).to(self.device) - - # Loss and Optimizer - self.criterion = Criterion(args) - self.optimizer = Optimizer(args, self.model) - self.scheduler = Scheduler(args, self.optimizer) - - # Train / Validate - min_loss = 1000 - early_stopping = 0 - t = time.time() - for epoch in range(1, args.epochs + 1): - self.epoch = epoch - train_loss, train_mae = self.training(args) - val_loss, val_mae = self.validate() - - if args.scheduler == 'Reduce': - self.scheduler.step(val_loss) - else: - self.scheduler.step() - - # Save models - if val_loss < min_loss: - early_stopping = 0 - best_epoch = epoch - best_mae = val_mae - min_loss = val_loss - torch.save(self.model.state_dict(), os.path.join(save_path, 'best_model.pth')) - print(f'-----------------SAVE:{best_epoch}epoch----------------') - else: - early_stopping += 1 - - if early_stopping == args.patience + 5: - break - - print(f'\nBest Val Epoch:{best_epoch} | Val Loss:{min_loss:.3f} | Val MAE:{best_mae:.3f} ' - f'time: {(time.time() - t) / 60:.3f}M') - - # Test time - datasets = ['DUTS', 'DUT-O', 'HKU-IS', 'ECSSD', 'PASCAL-S'] - for dataset in datasets: - args.dataset = dataset - test_loss, test_mae, test_maxf, test_avgf, test_s_m = self.test(args, os.path.join(save_path)) - - print( - f'Test Loss:{test_loss:.3f} | MAX_F:{test_maxf:.3f} | AVG_F:{test_avgf:.3f} | MAE:{test_mae:.3f} ' - f'| S_Measure:{test_s_m:.3f}, time: {time.time() - t:.3f}s') - - end = time.time() - print(f'Total Process time:{(end - t) / 60:.3f}Minute') - - def training(self, args): - self.model.train() - train_loss = AvgMeter() - train_mae = AvgMeter() - - for images, masks, edges in tqdm(self.train_loader): - images = torch.tensor(images, device=self.device, dtype=torch.float32) - masks = torch.tensor(masks, device=self.device, dtype=torch.float32) - edges = torch.tensor(edges, device=self.device, dtype=torch.float32) - - self.optimizer.zero_grad() - outputs, edge_mask, ds_map = self.model(images) - loss1 = self.criterion(outputs, masks) - loss2 = self.criterion(ds_map[0], masks) - loss3 = self.criterion(ds_map[1], masks) - loss4 = self.criterion(ds_map[2], masks) - - loss_mask = self.criterion(edge_mask, edges) - loss = loss1 + loss2 + loss3 + loss4 + loss_mask - - loss.backward() - nn.utils.clip_grad_norm_(self.model.parameters(), args.clipping) - self.optimizer.step() - - # Metric - mae = torch.mean(torch.abs(outputs - masks)) - - # log - train_loss.update(loss.item(), n=images.size(0)) - train_mae.update(mae.item(), n=images.size(0)) - - print(f'Epoch:[{self.epoch:03d}/{args.epochs:03d}]') - print(f'Train Loss:{train_loss.avg:.3f} | MAE:{train_mae.avg:.3f}') - - return train_loss.avg, train_mae.avg - - def validate(self): - self.model.eval() - val_loss = AvgMeter() - val_mae = AvgMeter() - - with torch.no_grad(): - for images, masks, edges in tqdm(self.val_loader): - images = torch.tensor(images, device=self.device, dtype=torch.float32) - masks = torch.tensor(masks, device=self.device, dtype=torch.float32) - edges = torch.tensor(edges, device=self.device, dtype=torch.float32) - - outputs, edge_mask, ds_map = self.model(images) - loss1 = self.criterion(outputs, masks) - loss2 = self.criterion(ds_map[0], masks) - loss3 = self.criterion(ds_map[1], masks) - loss4 = self.criterion(ds_map[2], masks) - - loss_mask = self.criterion(edge_mask, edges) - loss = loss1 + loss2 + loss3 + loss4 + loss_mask - - # Metric - mae = torch.mean(torch.abs(outputs - masks)) - - # log - val_loss.update(loss.item(), n=images.size(0)) - val_mae.update(mae.item(), n=images.size(0)) - - print(f'Valid Loss:{val_loss.avg:.3f} | MAE:{val_mae.avg:.3f}') - return val_loss.avg, val_mae.avg - - def test(self, args, save_path): - path = os.path.join(save_path, 'best_model.pth') - self.model.load_state_dict(torch.load(path)) - print('###### pre-trained Model restored #####') - - te_img_folder = os.path.join(args.data_path, args.dataset, 'Test/images/') - te_gt_folder = os.path.join(args.data_path, args.dataset, 'Test/masks/') - test_loader = get_loader(te_img_folder, te_gt_folder, edge_folder=None, phase='test', - batch_size=args.batch_size, shuffle=False, - num_workers=args.num_workers, transform=self.test_transform) - - self.model.eval() - test_loss = AvgMeter() - test_mae = AvgMeter() - test_maxf = AvgMeter() - test_avgf = AvgMeter() - test_s_m = AvgMeter() - - Eval_tool = Evaluation_metrics(args.dataset, self.device) - - with torch.no_grad(): - for i, (images, masks, original_size, image_name) in enumerate(tqdm(test_loader)): - images = torch.tensor(images, device=self.device, dtype=torch.float32) - - outputs, edge_mask, ds_map = self.model(images) - H, W = original_size - - for i in range(images.size(0)): - mask = gt_to_tensor(masks[i]) - - h, w = H[i].item(), W[i].item() - - output = F.interpolate(outputs[i].unsqueeze(0), size=(h, w), mode='bilinear') - - loss = self.criterion(output, mask) - - # Metric - mae, max_f, avg_f, s_score = Eval_tool.cal_total_metrics(output, mask) - - # log - test_loss.update(loss.item(), n=1) - test_mae.update(mae, n=1) - test_maxf.update(max_f, n=1) - test_avgf.update(avg_f, n=1) - test_s_m.update(s_score, n=1) - - test_loss = test_loss.avg - test_mae = test_mae.avg - test_maxf = test_maxf.avg - test_avgf = test_avgf.avg - test_s_m = test_s_m.avg - - return test_loss, test_mae, test_maxf, test_avgf, test_s_m - - -class Tester(): - def __init__(self, args, save_path): - super(Tester, self).__init__() - self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - self.test_transform = get_test_augmentation(img_size=args.img_size) - self.args = args - self.save_path = save_path - - # Network - self.model = TRACER(args).to(self.device) - if args.multi_gpu: - self.model = nn.DataParallel(self.model).to(self.device) - - path = os.path.join(save_path, 'best_model.pth') - self.model.load_state_dict(torch.load(path)) - print('###### pre-trained Model restored #####') - - self.criterion = Criterion(args) - - te_img_folder = os.path.join(args.data_path, args.dataset, 'Test/images/') - te_gt_folder = os.path.join(args.data_path, args.dataset, 'Test/masks/') - - self.test_loader = get_loader(te_img_folder, te_gt_folder, edge_folder=None, phase='test', - batch_size=args.batch_size, shuffle=False, - num_workers=args.num_workers, transform=self.test_transform) - - if args.save_map is not None: - os.makedirs(os.path.join('mask', 'exp'+str(self.args.exp_num), self.args.dataset), exist_ok=True) - - def test(self): - self.model.eval() - test_loss = AvgMeter() - test_mae = AvgMeter() - test_maxf = AvgMeter() - test_avgf = AvgMeter() - test_s_m = AvgMeter() - t = time.time() - - Eval_tool = Evaluation_metrics(self.args.dataset, self.device) - - with torch.no_grad(): - for i, (images, masks, original_size, image_name) in enumerate(tqdm(self.test_loader)): - images = torch.tensor(images, device=self.device, dtype=torch.float32) - - outputs, edge_mask, ds_map = self.model(images) - H, W = original_size - - for i in range(images.size(0)): - mask = gt_to_tensor(masks[i]) - h, w = H[i].item(), W[i].item() - - output = F.interpolate(outputs[i].unsqueeze(0), size=(h, w), mode='bilinear') - loss = self.criterion(output, mask) - - # Metric - mae, max_f, avg_f, s_score = Eval_tool.cal_total_metrics(output, mask) - - # Save prediction map - if self.args.save_map is not None: - output = (output.squeeze().detach().cpu().numpy()*255.0).astype(np.uint8) # convert uint8 type - cv2.imwrite(os.path.join('mask', 'exp'+str(self.args.exp_num), self.args.dataset, image_name[i]+'.png'), output) - - # log - test_loss.update(loss.item(), n=1) - test_mae.update(mae, n=1) - test_maxf.update(max_f, n=1) - test_avgf.update(avg_f, n=1) - test_s_m.update(s_score, n=1) - - test_loss = test_loss.avg - test_mae = test_mae.avg - test_maxf = test_maxf.avg - test_avgf = test_avgf.avg - test_s_m = test_s_m.avg - - print(f'Test Loss:{test_loss:.4f} | MAX_F:{test_maxf:.4f} | MAE:{test_mae:.4f} ' - f'| S_Measure:{test_s_m:.4f}, time: {time.time() - t:.3f}s') - - return test_loss, test_mae, test_maxf, test_avgf, test_s_m diff --git a/spaces/ennov8ion/semirealistic-models/index.html b/spaces/ennov8ion/semirealistic-models/index.html deleted file mode 100644 index 40b11abfac0f6f7c145d1d349a978f07587cf433..0000000000000000000000000000000000000000 --- a/spaces/ennov8ion/semirealistic-models/index.html +++ /dev/null @@ -1,305 +0,0 @@ -import gradio as gr -import os -import sys -from pathlib import Path - -models = [ - {"name": "Deliberate", "url": "Masagin/Deliberate"}, - {"name": "Dreamlike Anime", "url": "dreamlike-art/dreamlike-anime-1.0"}, - {"name": "Dreamlike Diffusion", "url": "dreamlike-art/dreamlike-diffusion-1.0"}, - {"name": "Dreamlike Photoreal", "url": "dreamlike-art/dreamlike-photoreal-2.0"}, - {"name": "Dreamshaper", "url": "Lykon/DreamShaper"}, - {"name": "Lyriel 1.3", "url": "sakistriker/Lyriel_V1.3"}, - {"name": "Never Ending Dream 2", "url": "luongphamit/NeverEnding-Dream2"}, - {"name": "Protogen X 5.8", "url": "darkstorm2150/Protogen_x5.8_Official_Release"}, - {"name": "❤ ART MODELS ==========", "url": "dreamlike-art/dreamlike-diffusion-1.0"}, - {"name": "Alice in Diffusion Land", "url": "Guizmus/SDArt_AliceInDiffusionLand"}, - {"name": "Alt Clip", "url": "BAAI/AltCLIP"}, - {"name": "Anything Midjourney 4.1", "url": "Joeythemonster/anything-midjourney-v-4-1"}, - {"name": "Chaos and Order", "url": "Guizmus/SDArt_ChaosAndOrder768"}, - {"name": "Chilloutclara", "url": "Fred99774/chilloutvlara"}, - {"name": "Comic Diffusion", "url": "ogkalu/Comic-Diffusion"}, - {"name": "Cosmic Horros 768", "url": "Guizmus/SDArt_cosmichorrors768"}, - {"name": "Cosmic Horros", "url": "Guizmus/SDArt_cosmichorrors"}, - {"name": "DGSpitzer", "url": "DGSpitzer/DGSpitzer-Art-Diffusion"}, - {"name": "Dungeons and Diffusion", "url": "0xJustin/Dungeons-and-Diffusion"}, - {"name": "Elden Ring", "url": "nitrosocke/elden-ring-diffusion"}, - {"name": "Epic Diffusion 1.1", "url": "johnslegers/epic-diffusion-v1.1"}, - {"name": "Epic Diffusion", "url": "johnslegers/epic-diffusion"}, - {"name": "EpicMix Realism", "url": "Duskfallcrew/EpicMix_Realism"}, - {"name": "Fantasy Mix", "url": "theintuitiveye/FantasyMix"}, - {"name": "Girl New 1", "url": "Fred99774/girlnew1"}, - {"name": "Lit 6B", "url": "hakurei/lit-6B"}, - {"name": "Luna Diffusion", "url": "proximasanfinetuning/luna-diffusion"}, - {"name": "Midjourney 4.0", "url": "flax/midjourney-v4-diffusion"}, - {"name": "Midjourney 4.1", "url": "Joeythemonster/anything-midjourney-v-4-1"}, - {"name": "Mo-Di Diffusion", "url": "nitrosocke/mo-di-diffusion"}, - {"name": "Nitro Diffusion", "url": "nitrosocke/Nitro-Diffusion"}, - {"name": "Openjourney V2", "url": "prompthero/openjourney-v2"}, - {"name": "Openjourney", "url": "prompthero/openjourney"}, - {"name": "Seek Art Mega", "url": "coreco/seek.art_MEGA"}, - {"name": "Something", "url": "Guizmus/SDArt_something"}, - {"name": "Spider Verse diffusion", "url": "nitrosocke/spider-verse-diffusion"}, - {"name": "Vintedois 1.0", "url": "22h/vintedois-diffusion-v0-1"}, - {"name": "Vintedois 2.0", "url": "22h/vintedois-diffusion-v0-2"}, - {"name": "❤ ART STYLES ==========", "url": "joachimsallstrom/Double-Exposure-Diffusion"}, - {"name": "Balloon Art", "url": "Fictiverse/Stable_Diffusion_BalloonArt_Model"}, - {"name": "Double Exposure Diffusion", "url": "joachimsallstrom/Double-Exposure-Diffusion"}, - {"name": "Fluid Art", "url": "Fictiverse/Stable_Diffusion_FluidArt_Model"}, - {"name": "GTA5 Artwork Diffusion", "url": "ItsJayQz/GTA5_Artwork_Diffusion"}, - {"name": "Marvel WhatIf Diffusion", "url": "ItsJayQz/Marvel_WhatIf_Diffusion"}, - {"name": "Naruto Diffuser", "url": "lambdalabs/sd-naruto-diffusers"}, - {"name": "Papercut", "url": "Fictiverse/Stable_Diffusion_PaperCut_Model"}, - {"name": "Pokemon Diffuser", "url": "lambdalabs/sd-pokemon-diffusers"}, - {"name": "Synthwave Punk 2", "url": "ItsJayQz/SynthwavePunk-v2"}, - {"name": "Valorant Diffusion", "url": "ItsJayQz/Valorant_Diffusion"}, - {"name": "Van Gogh Diffusion", "url": "dallinmackay/Van-Gogh-diffusion"}, - {"name": "Vectorartz Diffusion", "url": "coder119/Vectorartz_Diffusion"}, - {"name": "VoxelArt", "url": "Fictiverse/Stable_Diffusion_VoxelArt_Model"}, - {"name": "❤ ANIME MODELS ==========", "url": "dreamlike-art/dreamlike-anime-1.0"}, - {"name": "7 Pa", "url": "AIARTCHAN/7pa"}, - {"name": "A Certain Model", "url": "JosephusCheung/ACertainModel"}, - {"name": "A Certain Thing", "url": "JosephusCheung/ACertainThing"}, - {"name": "A Certainity", "url": "JosephusCheung/ACertainty"}, - {"name": "Abyss Hell Hero", "url": "AIARTCHAN/AbyssHellHero"}, - {"name": "Abyss Maple 3", "url": "AIARTCHAN/AbyssMapleVer3"}, - {"name": "Abyss Orange Mix 2", "url": "WarriorMama777/AbyssOrangeMix2"}, - {"name": "Abyss Orange Mix 4", "url": "sakistriker/AbyssOrangeMix3"}, - {"name": "Abyss Orange Mix", "url": "WarriorMama777/AbyssOrangeMix"}, - {"name": "AbyssHell 3", "url": "AIARTCHAN/AbyssHellVer3"}, - {"name": "All 526 Animated", "url": "stablediffusionapi/all-526-animated"}, - {"name": "Anidosmix 3", "url": "AIARTCHAN/anidosmixV2"}, - {"name": "Anime Kawai Diffusion", "url": "Ojimi/anime-kawai-diffusion"}, - {"name": "Anireal 3D V2", "url": "circulus/sd-anireal-3d-v2"}, - {"name": "AnyLORA", "url": "kubanemil/AnyLORA"}, - {"name": "Anything 2.1", "url": "swl-models/anything-v2.1"}, - {"name": "Anything 3.0 Light", "url": "mm00/anything-v3.0-light"}, - {"name": "Anything 3.0", "url": "Linaqruf/anything-v3.0"}, - {"name": "Anything 3.1", "url": "cag/anything-v3-1"}, - {"name": "Anything 3X", "url": "iZELX1/Anything-V3-X"}, - {"name": "Anything 4.0", "url": "andite/anything-v4.0"}, - {"name": "Anything 5", "url": "sakistriker/Anything_V5_PrtRE"}, - {"name": "Anything 5.0", "url": "stablediffusionapi/anything-v5"}, - {"name": "Anything Else 4", "url": "stablediffusionapi/anythingelse-v4"}, - {"name": "Anything Else 5", "url": "stablediffusionapi/anything-v5"}, - {"name": "Arcane Diffusion", "url": "nitrosocke/Arcane-Diffusion"}, - {"name": "Archer Diffusion", "url": "nitrosocke/archer-diffusion"}, - {"name": "Asian Mix", "url": "D1b4l4p/AsianMix"}, - {"name": "Blood Orange Mix", "url": "WarriorMama777/BloodOrangeMix"}, - {"name": "CamelliaMix 2.5D","url": "stablediffusionapi/camelliamix25d"}, - {"name": "CamelliaMix Line","url": "stablediffusionapi/camelliamixline"}, - {"name": "CamelliaMix","url": "Powidl43/CamelliaMix"}, - {"name": "Cetusmix", "url": "stablediffusionapi/cetusmix"}, - {"name": "Chik Mix", "url": "stablediffusionapi/chikmix"}, - {"name": "Chikmix", "url": "stablediffusionapi/chikmix"}, - {"name": "Chillout App Factory","url": "stablediffusionapi/chillout-app-factory"}, - {"name": "Classic Anime", "url": "nitrosocke/classic-anim-diffusion"}, - {"name": "Cool Japan Diffusion 2.1.2", "url": "aipicasso/cool-japan-diffusion-2-1-2"}, - {"name": "Cosmic Babes", "url": "stablediffusionapi/cosmic-babes"}, - {"name": "Counterfeit 1.0", "url": "gsdf/counterfeit-v1.0"}, - {"name": "Counterfeit 2", "url": "gsdf/Counterfeit-V2.0"}, - {"name": "Counterfeit 2.0", "url": "gsdf/Counterfeit-V2.0"}, - {"name": "Counterfeit 3.0", "url": "stablediffusionapi/counterfeit-v30"}, - {"name": "CuteSexyRobutts", "url": "andite/cutesexyrobutts-diffusion"}, - {"name": "CyberPunk Anime", "url": "DGSpitzer/Cyberpunk-Anime-Diffusion"}, - {"name": "Dark Sushi Mix", "url": "stablediffusionapi/dark-sushi-mix"}, - {"name": "Dash Sushi 25d", "url": "stablediffusionapi/dark-sushi-25d"}, - {"name": "DucHaiten Anime", "url": "DucHaiten/DucHaitenAnime"}, - {"name": "Eerie Orange Mix", "url": "WarriorMama777/EerieOrangeMix"}, - {"name": "Eimis Anime Diffusion", "url": "eimiss/EimisAnimeDiffusion_1.0v"}, - {"name": "Ghibli Diffusion", "url": "nitrosocke/Ghibli-Diffusion"}, - {"name": "GrapeFruit", "url": "iZELX1/Grapefruit"}, - {"name": "GuoFeng 3", "url": "xiaolxl/GuoFeng3"}, - {"name": "Guweiz Diffusion", "url": "andite/guweiz-diffusion"}, - {"name": "Hiten Diffusion", "url": "andite/hiten-diffusion"}, - {"name": "Icomix 2", "url": "stablediffusionapi/icomix-2"}, - {"name": "InkPunk Diffusion", "url": "Envvi/Inkpunk-Diffusion"}, - {"name": "Mama Orange Mixs", "url": "WarriorMama777/OrangeMixs"}, - {"name": "Mashuu Diffusion", "url": "andite/mashuu-diffusion"}, - {"name": "Meainamis 8", "url": "sakistriker/MeinaMix_V8"}, - {"name": "Meina Alter", "url": "stablediffusionapi/meinaalter"}, - {"name": "Meina Pastel", "url": "stablediffusionapi/meinapastel"}, - {"name": "MeinaMix 7", "url": "Nacholmo/meinamixv7-diffusers"}, - {"name": "Mignon Diffusion", "url": "andite/mignon-diffusion"}, - {"name": "MikaPikazo Diffusion", "url": "andite/mikapikazo-diffusion"}, - {"name": "Mikapikazo", "url": "andite/mikapikazo-diffusion"}, - {"name": "Mix Pro V4", "url": "AIARTCHAN/MIX-Pro-V4"}, - {"name": "NeverEnding-Dream", "url": "Lykon/NeverEnding-Dream"}, - {"name": "Niji V5 Style 1", "url": "sakistriker/NijiV5style_V1"}, - {"name": "Openjourney 4", "url": "prompthero/openjourney-v4"}, - {"name": "OpenNiji", "url": "Korakoe/OpenNiji"}, - {"name": "Pastel Mix", "url": "andite/pastel-mix"}, - {"name": "Picasso Diffusion 1.1", "url": "aipicasso/picasso-diffusion-1-1"}, - {"name": "Piromizu Diffusion", "url": "andite/piromizu-diffusion"}, - {"name": "Protogen 2.2", "url": "darkstorm2150/Protogen_v2.2_Official_Release"}, - {"name": "Protogen Infinity", "url": "darkstorm2150/Protogen_Infinity_Official_Release"}, - {"name": "Protogen X 3.4", "url": "darkstorm2150/Protogen_x3.4_Official_Release"}, - {"name": "Rev Anim", "url": "stablediffusionapi/rev-anim"}, - {"name": "Rev Animated", "url": "coreml/coreml-ReV-Animated"}, - {"name": "Rev Animated", "url": "LottePeisch/RevAnimated-Diffusers"}, - {"name": "Something V 2.2","url": "NoCrypt/SomethingV2_2"}, - {"name": "Something V2","url": "NoCrypt/SomethingV2"}, - {"name": "Three Delicacy", "url": "stablediffusionapi/three-delicacy"}, - {"name": "Three Delicacy wonto", "url": "stablediffusionapi/three-delicacy-wonto"}, - {"name": "TMND mix", "url": "stablediffusionapi/tmnd-mix"}, - {"name": "Waifu Diffusion", "url": "hakurei/waifu-diffusion"}, - {"name": "❤ REALISTIC PHOTO MODELS ==========", "url": "dreamlike-art/dreamlike-photoreal-2.0"}, - {"name": "AmiIReal", "url": "stablediffusionapi/amireal"}, - {"name": "Analog Diffusion", "url": "wavymulder/Analog-Diffusion"}, - {"name": "Circulus 2.8", "url": "circulus/sd-photoreal-v2.8"}, - {"name": "Circulus Photoreal V2", "url": "circulus/sd-photoreal-real-v2"}, - {"name": "Claudfuen 1", "url": "claudfuen/photorealistic-fuen-v1"}, - {"name": "Collage Diffusion", "url": "wavymulder/collage-diffusion"}, - {"name": "Cyberrealistic", "url": "stablediffusionapi/cyberrealistic"}, - {"name": "Dreamful 2", "url": "Hius/DreamFul-V2"}, - {"name": "GakkiMix768", "url": "Sa1i/gakki-mix-768"}, - {"name": "Grimoeresigils", "url": "ECarbenia/grimoiresigils"}, - {"name": "HARDBlend", "url": "theintuitiveye/HARDblend"}, - {"name": "HassanBlend 1.4", "url": "hassanblend/hassanblend1.4"}, - {"name": "HassanBlend 1.5.1.2", "url": "hassanblend/HassanBlend1.5.1.2"}, - {"name": "Lomo Diffusion", "url": "wavymulder/lomo-diffusion"}, - {"name": "Model Shoot", "url": "wavymulder/modelshoot"}, - {"name": "Portrait Plus", "url": "wavymulder/portraitplus"}, - {"name": "QuinceMix", "url": "Hemlok/QuinceMix"}, - {"name": "Realistic Vision 1.4", "url": "SG161222/Realistic_Vision_V1.4"}, - {"name": "The Ally", "url": "stablediffusionapi/the-ally"}, - {"name": "Timeless Diffusion", "url": "wavymulder/timeless-diffusion"}, - {"name": "UltraSkin", "url": "VegaKH/Ultraskin"}, - {"name": "Wavyfusion", "url": "wavymulder/wavyfusion"}, - {"name": "❤ SEMI-REALISTIC MODELS ==========", "url": "stablediffusionapi/all-526"}, - {"name": "All 526", "url": "stablediffusionapi/all-526"}, - {"name": "All 526 animated", "url": "stablediffusionapi/all-526-animated"}, - {"name": "Circulus Semi Real 2", "url": "circulus/sd-photoreal-semi-v2"}, - {"name": "Semi Real Mix", "url": "robotjung/SemiRealMix"}, - {"name": "SpyBG", "url": "stablediffusionapi/spybg"}, - {"name": "❤ STABLE DIFFUSION MODELS ==========", "url": "stabilityai/stable-diffusion-2-1"}, - {"name": "Stable Diffusion 1.4","url": "CompVis/stable-diffusion-v1-4"}, - {"name": "Stable Diffusion 1.5","url": "runwayml/stable-diffusion-v1-5"}, - {"name": "Stable Diffusion 2.1","url": "stabilityai/stable-diffusion-2-1"}, - {"name": "Stable Diffusion 2.1 Base","url": "stabilityai/stable-diffusion-2-1-base"}, - {"name": "Stable Diffusion 2.1 Unclip","url": "stabilityai/stable-diffusion-2-1-unclip"}, - {"name": "❤ SCI FI MODELS ==========", "url": "nitrosocke/Future-Diffusion"}, - {"name": "Future Diffusion", "url": "nitrosocke/Future-Diffusion"}, - {"name": "JWST Deep Space Diffusion", "url": "dallinmackay/JWST-Deep-Space-diffusion"}, - {"name": "Robo Diffusion 3 Base", "url": "nousr/robo-diffusion-2-base"}, - {"name": "Robo Diffusion", "url": "nousr/robo-diffusion"}, - {"name": "Tron Legacy Diffusion", "url": "dallinmackay/Tron-Legacy-diffusion"}, - {"name": "❤ 3D ART MODELS ==========", "url": "DucHaiten/DucHaitenAIart"}, - {"name": "DucHaiten Art", "url": "DucHaiten/DucHaitenAIart"}, - {"name": "DucHaiten ClassicAnime", "url": "DucHaiten/DH_ClassicAnime"}, - {"name": "DucHaiten DreamWorld", "url": "DucHaiten/DucHaitenDreamWorld"}, - {"name": "DucHaiten Journey", "url": "DucHaiten/DucHaitenJourney"}, - {"name": "DucHaiten StyleLikeMe", "url": "DucHaiten/DucHaiten-StyleLikeMe"}, - {"name": "DucHaiten SuperCute", "url": "DucHaiten/DucHaitenSuperCute"}, - {"name": "Redshift Diffusion 768", "url": "nitrosocke/redshift-diffusion-768"}, - {"name": "Redshift Diffusion", "url": "nitrosocke/redshift-diffusion"}, -] - -current_model = models[0] - -text_gen = gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link") - -models2 = [] -for model in models: - model_url = f"models/{model['url']}" - loaded_model = gr.Interface.load(model_url, live=True, preprocess=True) - models2.append(loaded_model) - - -def text_it(inputs, text_gen=text_gen): - return text_gen(inputs) - - -def set_model(current_model_index): - global current_model - current_model = models[current_model_index] - return gr.update(label=f"{current_model['name']}") - - -def send_it(inputs, model_choice): - proc = models2[model_choice] - return proc(inputs) - - -css = """""" - -with gr.Blocks(css=css) as myface: - gr.HTML( - """ - - - - - - - - - - - - - - - -""" - ) - - with gr.Row(): - with gr.Row(): - input_text = gr.Textbox(label="Prompt idea", lines=1) - # Model selection dropdown - model_name1 = gr.Dropdown( - label="Choose Model", - choices=[m["name"] for m in models], - type="index", - value=current_model["name"], - interactive=True, - ) - with gr.Row(): - see_prompts = gr.Button("Generate Prompts") - run = gr.Button("Generate Images", variant="primary") - with gr.Tab("Main"): - with gr.Row(): - output1 = gr.Image(label=f"{current_model['name']}") - output2 = gr.Image(label=f"{current_model['name']}") - output3 = gr.Image(label=f"{current_model['name']}") - output4 = gr.Image(label=f"{current_model['name']}") - with gr.Row(): - magic1 = gr.Textbox(lines=4) - magic2 = gr.Textbox(lines=4) - magic3 = gr.Textbox(lines=4) - magic4 = gr.Textbox(lines=4) - - with gr.Row(): - output5 = gr.Image(label=f"{current_model['name']}") - output6 = gr.Image(label=f"{current_model['name']}") - output7 = gr.Image(label=f"{current_model['name']}") - output8 = gr.Image(label=f"{current_model['name']}") - with gr.Row(): - magic5 = gr.Textbox(lines=4) - magic6 = gr.Textbox(lines=4) - magic7 = gr.Textbox(lines=4) - magic8 = gr.Textbox(lines=4) - - model_name1.change(set_model, inputs=model_name1, outputs=[output1, output2, output3, output4, output5, output6, output7, output8]) - - run.click(send_it, inputs=[magic1, model_name1], outputs=[output1]) - run.click(send_it, inputs=[magic2, model_name1], outputs=[output2]) - run.click(send_it, inputs=[magic3, model_name1], outputs=[output3]) - run.click(send_it, inputs=[magic4, model_name1], outputs=[output4]) - run.click(send_it, inputs=[magic5, model_name1], outputs=[output5]) - run.click(send_it, inputs=[magic6, model_name1], outputs=[output6]) - run.click(send_it, inputs=[magic7, model_name1], outputs=[output7]) - run.click(send_it, inputs=[magic8, model_name1], outputs=[output8]) - - see_prompts.click(text_it, inputs=[input_text], outputs=[magic1]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic2]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic3]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic4]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic5]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic6]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic7]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic8]) - -myface.queue(concurrency_count=200) -myface.launch(inline=True, show_api=False, max_threads=400) \ No newline at end of file diff --git a/spaces/evaluate-metric/mean_iou/mean_iou.py b/spaces/evaluate-metric/mean_iou/mean_iou.py deleted file mode 100644 index 421a261f41d4ddb5782ed005e0405560494cc378..0000000000000000000000000000000000000000 --- a/spaces/evaluate-metric/mean_iou/mean_iou.py +++ /dev/null @@ -1,314 +0,0 @@ -# Copyright 2022 The HuggingFace Evaluate Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Mean IoU (Intersection-over-Union) metric.""" - -from typing import Dict, Optional - -import datasets -import numpy as np - -import evaluate - - -_DESCRIPTION = """ -IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union -between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, -the mean IoU of the image is calculated by taking the IoU of each class and averaging them. -""" - -_KWARGS_DESCRIPTION = """ -Args: - predictions (`List[ndarray]`): - List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. - references (`List[ndarray]`): - List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. - num_labels (`int`): - Number of classes (categories). - ignore_index (`int`): - Index that will be ignored during evaluation. - nan_to_num (`int`, *optional*): - If specified, NaN values will be replaced by the number defined by the user. - label_map (`dict`, *optional*): - If specified, dictionary mapping old label indices to new label indices. - reduce_labels (`bool`, *optional*, defaults to `False`): - Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, - and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. - -Returns: - `Dict[str, float | ndarray]` comprising various elements: - - *mean_iou* (`float`): - Mean Intersection-over-Union (IoU averaged over all categories). - - *mean_accuracy* (`float`): - Mean accuracy (averaged over all categories). - - *overall_accuracy* (`float`): - Overall accuracy on all images. - - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): - Per category accuracy. - - *per_category_iou* (`ndarray` of shape `(num_labels,)`): - Per category IoU. - -Examples: - - >>> import numpy as np - - >>> mean_iou = evaluate.load("mean_iou") - - >>> # suppose one has 3 different segmentation maps predicted - >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) - >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) - - >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) - >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) - - >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) - >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) - - >>> predicted = [predicted_1, predicted_2, predicted_3] - >>> ground_truth = [actual_1, actual_2, actual_3] - - >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) - >>> print(results) # doctest: +NORMALIZE_WHITESPACE - {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} -""" - -_CITATION = """\ -@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, -author = {{MMSegmentation Contributors}}, -license = {Apache-2.0}, -month = {7}, -title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, -url = {https://github.com/open-mmlab/mmsegmentation}, -year = {2020} -}""" - - -def intersect_and_union( - pred_label, - label, - num_labels, - ignore_index: bool, - label_map: Optional[Dict[int, int]] = None, - reduce_labels: bool = False, -): - """Calculate intersection and Union. - - Args: - pred_label (`ndarray`): - Prediction segmentation map of shape (height, width). - label (`ndarray`): - Ground truth segmentation map of shape (height, width). - num_labels (`int`): - Number of categories. - ignore_index (`int`): - Index that will be ignored during evaluation. - label_map (`dict`, *optional*): - Mapping old labels to new labels. The parameter will work only when label is str. - reduce_labels (`bool`, *optional*, defaults to `False`): - Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, - and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. - - Returns: - area_intersect (`ndarray`): - The intersection of prediction and ground truth histogram on all classes. - area_union (`ndarray`): - The union of prediction and ground truth histogram on all classes. - area_pred_label (`ndarray`): - The prediction histogram on all classes. - area_label (`ndarray`): - The ground truth histogram on all classes. - """ - if label_map is not None: - for old_id, new_id in label_map.items(): - label[label == old_id] = new_id - - # turn into Numpy arrays - pred_label = np.array(pred_label) - label = np.array(label) - - if reduce_labels: - label[label == 0] = 255 - label = label - 1 - label[label == 254] = 255 - - mask = label != ignore_index - mask = np.not_equal(label, ignore_index) - pred_label = pred_label[mask] - label = np.array(label)[mask] - - intersect = pred_label[pred_label == label] - - area_intersect = np.histogram(intersect, bins=num_labels, range=(0, num_labels - 1))[0] - area_pred_label = np.histogram(pred_label, bins=num_labels, range=(0, num_labels - 1))[0] - area_label = np.histogram(label, bins=num_labels, range=(0, num_labels - 1))[0] - - area_union = area_pred_label + area_label - area_intersect - - return area_intersect, area_union, area_pred_label, area_label - - -def total_intersect_and_union( - results, - gt_seg_maps, - num_labels, - ignore_index: bool, - label_map: Optional[Dict[int, int]] = None, - reduce_labels: bool = False, -): - """Calculate Total Intersection and Union, by calculating `intersect_and_union` for each (predicted, ground truth) pair. - - Args: - results (`ndarray`): - List of prediction segmentation maps, each of shape (height, width). - gt_seg_maps (`ndarray`): - List of ground truth segmentation maps, each of shape (height, width). - num_labels (`int`): - Number of categories. - ignore_index (`int`): - Index that will be ignored during evaluation. - label_map (`dict`, *optional*): - Mapping old labels to new labels. The parameter will work only when label is str. - reduce_labels (`bool`, *optional*, defaults to `False`): - Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, - and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. - - Returns: - total_area_intersect (`ndarray`): - The intersection of prediction and ground truth histogram on all classes. - total_area_union (`ndarray`): - The union of prediction and ground truth histogram on all classes. - total_area_pred_label (`ndarray`): - The prediction histogram on all classes. - total_area_label (`ndarray`): - The ground truth histogram on all classes. - """ - total_area_intersect = np.zeros((num_labels,), dtype=np.float64) - total_area_union = np.zeros((num_labels,), dtype=np.float64) - total_area_pred_label = np.zeros((num_labels,), dtype=np.float64) - total_area_label = np.zeros((num_labels,), dtype=np.float64) - for result, gt_seg_map in zip(results, gt_seg_maps): - area_intersect, area_union, area_pred_label, area_label = intersect_and_union( - result, gt_seg_map, num_labels, ignore_index, label_map, reduce_labels - ) - total_area_intersect += area_intersect - total_area_union += area_union - total_area_pred_label += area_pred_label - total_area_label += area_label - return total_area_intersect, total_area_union, total_area_pred_label, total_area_label - - -def mean_iou( - results, - gt_seg_maps, - num_labels, - ignore_index: bool, - nan_to_num: Optional[int] = None, - label_map: Optional[Dict[int, int]] = None, - reduce_labels: bool = False, -): - """Calculate Mean Intersection and Union (mIoU). - - Args: - results (`ndarray`): - List of prediction segmentation maps, each of shape (height, width). - gt_seg_maps (`ndarray`): - List of ground truth segmentation maps, each of shape (height, width). - num_labels (`int`): - Number of categories. - ignore_index (`int`): - Index that will be ignored during evaluation. - nan_to_num (`int`, *optional*): - If specified, NaN values will be replaced by the number defined by the user. - label_map (`dict`, *optional*): - Mapping old labels to new labels. The parameter will work only when label is str. - reduce_labels (`bool`, *optional*, defaults to `False`): - Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, - and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. - - Returns: - `Dict[str, float | ndarray]` comprising various elements: - - *mean_iou* (`float`): - Mean Intersection-over-Union (IoU averaged over all categories). - - *mean_accuracy* (`float`): - Mean accuracy (averaged over all categories). - - *overall_accuracy* (`float`): - Overall accuracy on all images. - - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): - Per category accuracy. - - *per_category_iou* (`ndarray` of shape `(num_labels,)`): - Per category IoU. - """ - total_area_intersect, total_area_union, total_area_pred_label, total_area_label = total_intersect_and_union( - results, gt_seg_maps, num_labels, ignore_index, label_map, reduce_labels - ) - - # compute metrics - metrics = dict() - - all_acc = total_area_intersect.sum() / total_area_label.sum() - iou = total_area_intersect / total_area_union - acc = total_area_intersect / total_area_label - - metrics["mean_iou"] = np.nanmean(iou) - metrics["mean_accuracy"] = np.nanmean(acc) - metrics["overall_accuracy"] = all_acc - metrics["per_category_iou"] = iou - metrics["per_category_accuracy"] = acc - - if nan_to_num is not None: - metrics = dict( - {metric: np.nan_to_num(metric_value, nan=nan_to_num) for metric, metric_value in metrics.items()} - ) - - return metrics - - -@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) -class MeanIoU(evaluate.Metric): - def _info(self): - return evaluate.MetricInfo( - description=_DESCRIPTION, - citation=_CITATION, - inputs_description=_KWARGS_DESCRIPTION, - features=datasets.Features( - # 1st Seq - height dim, 2nd - width dim - { - "predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))), - "references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))), - } - ), - reference_urls=[ - "https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py" - ], - ) - - def _compute( - self, - predictions, - references, - num_labels: int, - ignore_index: bool, - nan_to_num: Optional[int] = None, - label_map: Optional[Dict[int, int]] = None, - reduce_labels: bool = False, - ): - iou_result = mean_iou( - results=predictions, - gt_seg_maps=references, - num_labels=num_labels, - ignore_index=ignore_index, - nan_to_num=nan_to_num, - label_map=label_map, - reduce_labels=reduce_labels, - ) - return iou_result diff --git a/spaces/fatiXbelha/sd/Death City Zombie Invasion Mod - Blast Zombies with Awesome Weapons and Skills.md b/spaces/fatiXbelha/sd/Death City Zombie Invasion Mod - Blast Zombies with Awesome Weapons and Skills.md deleted file mode 100644 index 3a5440a4ce40e69a127eba37708c2f225610d1da..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Death City Zombie Invasion Mod - Blast Zombies with Awesome Weapons and Skills.md +++ /dev/null @@ -1,101 +0,0 @@ -
      -

      Death City: Zombie Invasion Mod APK - A Thrilling Survival Game

      -

      If you are a fan of zombie shooting games, you will love Death City: Zombie Invasion. This is a thrilling survival game where you have to fight your way through hordes of zombies in different locations. You can download the mod apk version of this game to enjoy unlimited money, ammo, and other features. In this article, we will tell you more about this game and how to download and install it on your Android device.

      -

      Introduction

      -

      What is Death City: Zombie Invasion?

      -

      Death City: Zombie Invasion is a first-person shooter game developed by Charm Tech. The game is set in a post-apocalyptic world where a virus has turned most people into zombies. You are one of the few survivors who have to fight for your life and find a way to escape the city. You can explore various atmospheric locations in 3D, such as rainforests, cities, ruins, highways, and more. You will encounter different types of zombies, each with their own strengths and weaknesses. You will also have to face boss battles and complete challenging missions.

      -

      death city zombie invasion mod apk an1


      Download Zip »»» https://urllie.com/2uNBGA



      -

      Why download the mod apk version?

      -

      The mod apk version of Death City: Zombie Invasion is a modified version of the original game that gives you access to unlimited resources and features. With the mod apk version, you can enjoy:

      -
        -
      • Unlimited money to buy weapons, upgrades, and items
      • -
      • Unlimited ammo to shoot without running out of bullets
      • -
      • No ads to interrupt your gameplay
      • -
      • No root required to install the game
      • -
      -

      The mod apk version will make your gaming experience more fun and exciting. You can download it for free from a trusted source.

      -

      Features of Death City: Zombie Invasion Mod APK

      -

      Stunning 3D graphics and realistic sound effects

      -

      One of the best things about Death City: Zombie Invasion is its stunning 3D graphics and realistic sound effects. The game has a dark and gloomy atmosphere that creates a sense of horror and suspense. The zombies look terrifying and gruesome, and their movements are smooth and realistic. The sound effects are also immersive and enhance the gameplay. You can hear the zombies' growls, screams, and moans, as well as the gunshots, explosions, and background music.

      -

      Various weapons and upgrades to choose from

      -

      Another great feature of Death City: Zombie Invasion is its variety of weapons and upgrades to choose from. You can use different types of guns, such as pistols, rifles, shotguns, snipers, machine guns, rocket launchers, and more. You can also upgrade your weapons with scopes, silencers, magazines, barrels, stocks, etc. You can also use grenades, mines, turrets, drones, and other items to help you in your battles. You can customize your weapons according to your preferences and play style.

      -

      Challenging missions and levels to complete

      -

      Death City: Zombie Invasion also has challenging missions and levels to complete. The game has over 100 levels with different objectives and difficulties. You have to kill a certain number of zombies, survive for a certain time, protect a certain area, or escape from a certain place. The game also has boss battles where you have to face powerful zombies that are hard to kill. The game will test your skills and strategy as you progress through the levels.

      -

      death city zombie invasion mod apk unlimited money
      -death city zombie invasion mod apk latest version
      -death city zombie invasion mod apk download for android
      -death city zombie invasion mod apk offline
      -death city zombie invasion mod apk free shopping
      -death city zombie invasion mod apk rexdl
      -death city zombie invasion mod apk revdl
      -death city zombie invasion mod apk happymod
      -death city zombie invasion mod apk android 1
      -death city zombie invasion mod apk no ads
      -death city zombie invasion hack mod apk
      -death city zombie invasion cheat mod apk
      -death city zombie invasion premium mod apk
      -death city zombie invasion pro mod apk
      -death city zombie invasion vip mod apk
      -death city zombie invasion mega mod apk
      -death city zombie invasion god mode apk
      -death city zombie invasion unlimited ammo apk
      -death city zombie invasion unlocked weapons apk
      -death city zombie invasion unlimited dna apk
      -download game death city zombie invasion mod apk
      -download death city zombie invasion hack apk
      -download death city zombie invasion cheat apk
      -download death city zombie invasion cracked apk
      -download death city zombie invasion full version apk
      -how to install death city zombie invasion mod apk
      -how to play death city zombie invasion mod apk
      -how to update death city zombie invasion mod apk
      -how to download death city zombie invasion mod apk on pc
      -how to download death city zombie invasion mod apk on ios
      -best weapons in death city zombie invasion mod apk
      -best characters in death city zombie invasion mod apk
      -best skills in death city zombie invasion mod apk
      -best tips for death city zombie invasion mod apk
      -best tricks for death city zombie invasion mod apk
      -best cheats for death city zombie invasion mod apk
      -best hacks for death city zombie invasion mod apk
      -gameplay of death city zombie invasion mod apk
      -review of death city zombie invasion mod apk
      -rating of death city zombie invasion mod apk
      -features of death city zombie invasion mod apk
      -benefits of death city zombie invasion mod apk
      -advantages of death city zombie invasion mod apk
      -disadvantages of death city zombie invasion mod apk
      -problems with death city zombie invasion mod apk
      -solutions for death city zombie invasion mod apk
      -alternatives to death city zombie invasion mod apk
      -similar games to death city zombie invasion mod apk
      -updates for death city zombie invasion mod apk

      -

      Collect zombie DNA and unlock new abilities

      -

      Another cool feature of Death City: Zombie Invasion is its zombie DNA system. You can collect zombie DNA from the zombies you kill and use it to unlock new abilities. You can enhance your attributes, such as health, speed, damage, and critical rate. You can also unlock special skills, such as bullet time, berserk mode, and explosive bullets. These abilities will give you an edge in your battles and make you more powerful.

      -

      How to download and install Death City: Zombie Invasion Mod APK

      -

      Step 1: Download the mod apk file from a trusted source

      -

      The first step to download and install Death City: Zombie Invasion Mod APK is to find a trusted source that provides the mod apk file. You can search online for websites that offer the mod apk file for free. Make sure to check the reviews and ratings of the website before downloading the file. You can also scan the file with an antivirus program to ensure that it is safe and virus-free.

      -

      Step 2: Enable unknown sources on your device settings

      -

      The second step to download and install Death City: Zombie Invasion Mod APK is to enable unknown sources on your device settings. This will allow you to install apps that are not from the Google Play Store. To do this, go to your device settings and look for security or privacy options. Then, find the option that says unknown sources or allow installation of apps from unknown sources. Toggle it on and confirm your choice.

      -

      Step 3: Install the mod apk file and launch the game

      -

      The third and final step to download and install Death City: Zombie Invasion Mod APK is to install the mod apk file and launch the game. To do this, locate the mod apk file that you downloaded in your device storage and tap on it. Follow the instructions on the screen to install the game. Once the installation is done, you can launch the game from your app drawer or home screen. Enjoy playing Death City: Zombie Invasion Mod APK with unlimited money, ammo, and other features.

      -

      Conclusion

      -

      Summary of the main points and benefits of the game

      -

      Death City: Zombie Invasion Mod APK is a thrilling survival game where you have to fight your way through hordes of zombies in different locations. The game has stunning 3D graphics and realistic sound effects that create a sense of horror and suspense. The game also has various weapons and upgrades to choose from, challenging missions and levels to complete, and zombie DNA system to unlock new abilities. The mod apk version of the game gives you access to unlimited money, ammo, and other features that make your gaming experience more fun and exciting.

      -

      Call to action and invitation to leave feedback

      -

      If you are looking for a zombie shooting game that will keep you on the edge of your seat, you should download Death City: Zombie Invasion Mod APK today. You can download it for free from a trusted source and install it on your Android device easily. You will love playing this game with its amazing features and mod apk benefits. Don't forget to share your feedback with us in the comments section below. We would love to hear from you.

      -

      Frequently Asked Questions

      -
        -
      • Q: Is Death City: Zombie Invasion Mod APK safe to download and install?
      • -
      • A: Yes, Death City: Zombie Invasion Mod APK is safe to download and install as long as you get it from a trusted source. You can also scan the file with an antivirus program before installing it.
      • -
      • Q: What are the minimum requirements to play Death City: Zombie Invasion Mod APK?
      • -
      • A: The minimum requirements to play Death City: Zombie Invasion Mod APK are Android 4.4 or higher, 2 GB of RAM, and 300 MB of free storage space.
      • -
      • Q: How can I update Death City: Zombie Invasion Mod APK?
      • -
      • A: To update Death City: Zombie Invasion Mod APK, you have to download the latest version of the mod apk file from a trusted source and install it over the existing one. You don't have to uninstall the previous version.
      • -
      • Q: Can I play Death City: Zombie Invasion Mod APK offline?
      • -
      • A: Yes, you can play Death City: Zombie Invasion Mod APK offline without an internet connection. However, some features may not work properly without an internet connection.
      • -
      • Q: Can I play Death City: Zombie Invasion Mod APK with my friends?
      • -
      • A: Yes, you can play Death City: Zombie Invasion Mod APK with your friends online. The game has a multiplayer mode where you can team up with other players and fight against zombies together. You can also chat with your friends and share your strategies and tips.
      • -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download BombSquad for PC - Free Action Game with Explosive Fun.md b/spaces/fatiXbelha/sd/Download BombSquad for PC - Free Action Game with Explosive Fun.md deleted file mode 100644 index 4c91f243b07c81462724a4ba715a2845e4d2f651..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download BombSquad for PC - Free Action Game with Explosive Fun.md +++ /dev/null @@ -1,124 +0,0 @@ -
      -

      Download BombSquad for PC Latest Version

      -

      Do you love blowing up your friends in mini-games ranging from capture-the-flag to hockey? Do you enjoy gratuitous explosions, advanced ragdoll face-plant physics, pirates, ninjas, barbarians, insane chefs, and more? If you answered yes to any of these questions, then you should definitely try BombSquad, an action-packed multiplayer party game by Eric Froemling. And the best part is, you can play it on your PC with the help of an emulator.

      -

      In this article, we will show you what BombSquad is, how to download and install it on your PC, and some tips and tricks to improve your gameplay. Let's get started!

      -

      download bombsquad for pc latest version


      Download File 🌟 https://urllie.com/2uNFgW



      -

      What is BombSquad?

      -

      BombSquad is an Android game that lets you blow up your friends in various mini-games that test your skills, reflexes, and strategy. You can play with up to 8 players locally or online, using touch screens, controllers, or even phones and tablets as controllers via the free 'BombSquad Remote' app. You can also customize your character, choose from different game modes, and create your own levels with the built-in level editor.

      -

      Features and gameplay of BombSquad

      -

      BombSquad has many features that make it fun and addictive to play. Here are some of them:

      -
        -
      • Multiple game modes: You can choose from different game modes such as Deathmatch, Capture the Flag, King of the Hill, Bomber Hockey, Epic Slow Motion Elimination, and more. Each mode has its own rules and objectives that require different strategies and tactics.
      • -
      • Various characters and power-ups: You can unlock over 15 characters to play as, each with their own appearance and personality. You can also use different power-ups to enhance your abilities or hinder your opponents. Some of the power-ups include ice bombs, sticky bombs, land mines, boxing gloves, shields, health packs, and more.
      • -
      • Stylized graphics and physics: BombSquad has a cartoon-like graphics style that is colorful and vibrant. The game also features advanced ragdoll face-plant physics that make the characters react realistically to explosions, falls, punches, and other actions. You can see them fly across the screen, bounce off walls, or collapse on the ground.
      • -
      -

      System requirements and compatibility of BombSquad

      -

      BombSquad is compatible with Android devices running Android 4.4 or higher. However, if you want to play it on your PC, you will need an emulator that can run Android apps on your computer. An emulator is a software that mimics the functions of another device or system. In this case, it will allow you to run BombSquad on your PC as if it were an Android device.

      -

      There are many emulators available for PC, but some of the most popular ones are BlueStacks, MEmu Player, MuMu Player, and BrowserCam. Each emulator has its own advantages and disadvantages, so you should choose the one that suits your preferences and needs. However, regardless of which emulator you choose, you will need a PC that meets these minimum system requirements:

      -

      download bombsquad for windows pc free
      -bombsquad game for pc latest version
      -how to install bombsquad on windows 10/11
      -bombsquad pc download with bluestacks emulator
      -play bombsquad online on pc with friends
      -bombsquad for mac free download
      -bombsquad pc game features and description
      -bombsquad windows pc download free - net.froemling.bombsquad
      -get bombsquad for pc and mac - pcmacstore.com
      -download and play bombsquad on pc and mac - bluestacks.com
      -bombsquad action game for pc and mac
      -bombsquad pc game download latest version 1.7.19
      -bombsquad for windows 7/8/10/11 pc and laptop
      -bombsquad pc game alternatives for windows and mac
      -bombsquad pc game installation requirements and FAQs
      -download bombsquad apk/xapk file for pc
      -bombsquad pc game pros and cons
      -bombsquad mini-games for pc and mac
      -bombsquad pc game screenshots and preview
      -bombsquad pc game licence and file size
      -download bombsquad mod apk for pc
      -bombsquad pc game changelog and updates
      -bombsquad pc game ratings and reviews
      -bombsquad pc game developer and publisher
      -bombsquad pc game compatibility and compactibility list
      -download bombsquad for windows 11 with noxplayer emulator
      -play bombsquad offline on pc and mac
      -bombsquad for pc and mac download server 1/server 2/server 3
      -how to uninstall bombsquad from pc and mac
      -how to update bombsquad on pc and mac
      -how to fix bombsquad not working on pc and mac
      -how to play bombsquad with gamepad or iphone on pc and mac
      -how to customize bombsquad settings on pc and mac
      -how to unlock all characters in bombsquad on pc and mac
      -how to join or create a server in bombsquad on pc and mac
      -how to use cheats in bombsquad on pc and mac
      -how to get free tickets in bombsquad on pc and mac
      -how to record or stream bombsquad gameplay on pc and mac
      -how to contact bombsquad support team on pc and mac
      -how to share or transfer bombsquad data on pc and mac
      -how to backup or restore bombsquad data on pc and mac
      -how to sync or link bombsquad account on pc and mac
      -how to invite or add friends in bombsquad on pc and mac
      -how to chat or communicate in bombsquad on pc and mac
      -how to report or block players in bombsquad on pc and mac
      -how to earn or redeem rewards in bombsquad on pc and mac
      -how to access or manage your profile in bombsquad on pc and mac
      -how to change your name or avatar in bombsquad on pc and mac
      -how to view or edit your controls in bombsquad on pc and mac

      -
        -
      • CPU: 2.0 GHz or higher
      • -
      • RAM: 2 GB or more
      • -
      • Video card: 512 MB or more
      • Storage: 5 GB or more
      • -
      • Operating system: Windows 7 or higher, Mac OS X 10.9 or higher
      • -
      -

      If your PC meets these requirements, you can proceed to the next step of downloading and installing BombSquad on your PC.

      -

      How to download and install BombSquad on PC

      -

      Downloading and installing BombSquad on your PC is easy and simple. Just follow these steps:

      -

      Step 1: Download an emulator

      -

      The first step is to download an emulator of your choice from its official website. For example, if you want to use BlueStacks, you can go to https://www.bluestacks.com/ and click on the "Download BlueStacks" button. The download will start automatically and you will get a .exe file on your PC.

      -

      Step 2: Install the emulator and sign in to Google Play

      -

      The next step is to install the emulator on your PC by double-clicking on the .exe file and following the instructions on the screen. Once the installation is complete, launch the emulator and sign in to your Google account. This will allow you to access the Google Play Store and download apps from it.

      -

      Step 3: Search for BombSquad in the app center

      -

      The third step is to search for BombSquad in the app center of the emulator. You can use the search bar or browse through the categories to find it. Alternatively, you can also use this link https://play.google.com/store/apps/details?id=net.froemling.bombsquad to go directly to the BombSquad page on Google Play.

      -

      Step 4: Install and launch BombSquad on PC

      -

      The final step is to install and launch BombSquad on your PC. Just click on the "Install" button on the BombSquad page and wait for the download and installation to finish. Once it is done, you can click on the "Open" button or find the BombSquad icon on your desktop or emulator home screen. Congratulations, you have successfully installed BombSquad on your PC!

      -

      Tips and tricks for playing BombSquad on PC

      -

      Now that you have BombSquad on your PC, you can enjoy blowing up your friends in various mini-games. However, if you want to improve your skills and have more fun, you should know some tips and tricks that can help you. Here are some of them:

      -

      Use a controller for better control

      -

      One of the advantages of playing BombSquad on PC is that you can use a controller instead of a touch screen or a keyboard. A controller can give you better control over your character's movements, actions, and power-ups. You can use any controller that is compatible with your PC or emulator, such as an Xbox, PlayStation, or Nintendo controller. You can also customize the controller settings in the game options.

      -

      Use power-ups wisely and strategically

      -

      Power-ups are essential for winning in BombSquad. They can give you an edge over your opponents or help you escape from tricky situations. However, you should not use them randomly or wastefully. You should use them wisely and strategically, depending on the game mode, the map, and the situation. For example, you should use ice bombs to freeze your enemies or slow them down, sticky bombs to attach them to walls or objects, land mines to trap them or block their paths, boxing gloves to knock them out or push them off cliffs, shields to protect yourself from attacks or explosions, health packs to heal yourself or your teammates, and so on.

      -

      Know your basic power-ups and their effects

      -

      BombSquad has many power-ups that can affect your character or your enemies in different ways. You should know what each power-up does and how to use it effectively. Here are some of the basic power-ups and their effects:

      - - - - - - - - - -
      Power-upEffect
      BombThe most common power-up in BombSquad. It explodes after a few seconds or when it hits something. It can damage or kill anyone nearby.
      Ice bombA blue bomb that freezes anyone it hits or anyone near its explosion radius. Frozen characters cannot move or act until they thaw out.
      Sticky bombA green bomb that sticks to anything it touches, including characters, walls, objects, or other bombs. It explodes after a few seconds or when it is triggered by another explosion. It can be used to trap or sabotage your enemies.
      Land mineA small device that is planted on the ground and explodes when someone steps on it or when it is triggered by another explosion. It can be used to defend your territory or surprise your enemies.
      Boxing gloveA large glove that can be used to punch your enemies or objects. It can knock out your enemies or push them away from you. It can also be used to deflect bombs or other projectiles.
      ShieldA circular device that creates a protective barrier around you. It can block bombs, bullets, punches, and other attacks. It can also reflect some projectiles back to their source. However, it has a limited duration and can be destroyed by powerful explosions.
      Health packA red cross that can restore some of your health or your teammates' health. It can be used to heal yourself or your allies when you are injured or low on health.
      -

      Fire bombs at large groups of enemies or flags

      -

      One of the best ways to use bombs in BombSquad is to fire them at large groups of enemies or flags. This can cause massive damage and chaos, as well as score you points or objectives. For example, in Capture the Flag mode, you can fire bombs at the enemy flag carrier or the enemy base to prevent them from capturing your flag or to capture theirs. In Deathmatch mode, you can fire bombs at clusters of enemies to kill them or weaken them. In King of the Hill mode, you can fire bombs at the hill to clear it of enemies or to defend it from invaders.

      -

      Move frantically to get rid of sticky bombs

      -

      One of the most annoying power-ups in BombSquad is the sticky bomb. It can stick to you and explode after a few seconds, damaging or killing you and anyone near you. However, there is a way to get rid of sticky bombs: move frantically. If you move fast enough and in different directions, you can shake off the sticky bomb and make it fall off you. You can also try to stick it to someone else or something else before it explodes.

      -

      Conclusion

      -

      BombSquad is a fun and exciting game that lets you blow up your friends in various mini-games. You can play it on your PC with the help of an emulator, which will allow you to enjoy the game on a bigger screen and with better control. You can also use some tips and tricks to improve your gameplay and have more fun. So what are you waiting for? Download BombSquad for PC today and start bombing!

      -

      FAQs

      -

      Here are some frequently asked questions about BombSquad:

      -
        -
      • Q: How many players can play BombSquad on PC?
      • -
      • A: You can play with up to 8 players locally or online on PC, using touch screens, controllers, or even phones and tablets as controllers via the free 'BombSquad Remote' app.
      • -
      • Q: How do I create my own levels in BombSquad?
      • -
      • A: You can create your own levels in BombSquad using the built-in level editor. You can access it from the main menu by selecting "Create/Edit Levels". You can then design your own maps, add objects, power-ups, enemies, and more.
      • -
      • Q: How do I join or host an online game in BombSquad?
      • -
      • A: You can join or host an online game in BombSquad by selecting "Gather" from the main menu. You can then choose to join an existing game or create your own game. You can also invite your friends to join your game by sending them a code or a link.
      • -
      • Q: How do I customize my character in BombSquad?
      • -
      • A: You can customize your character in BombSquad by selecting "Settings" from the main menu and then choosing "Appearance". You can then change your character's name, color, outfit, head, eyes, hair, beard, glasses, hat, and taunt.
      • -
      • Q: How do I unlock more characters and power-ups in BombSquad?
      • -
      • A: You can unlock more characters and power-ups in BombSquad by playing the game and earning tickets. Tickets are currency that you can use to buy new characters and power-ups from the store. You can also earn tickets by watching ads or completing offers.
      • -
      -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download FRAG Pro Shooter MOD APK 3.9.0 with Unlimited Money.md b/spaces/fatiXbelha/sd/Download FRAG Pro Shooter MOD APK 3.9.0 with Unlimited Money.md deleted file mode 100644 index 068f564fbe4966de2a35108268cd100928fe1132..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download FRAG Pro Shooter MOD APK 3.9.0 with Unlimited Money.md +++ /dev/null @@ -1,55 +0,0 @@ - -

      FRAG Pro Shooter MOD APK: A Fun and Exciting Online Shooter Game

      -

      If you are looking for a game that combines the thrill of shooting, the strategy of team-building, and the fun of online multiplayer, then you should try FRAG Pro Shooter. This game is an online first-person 3D shooter that lets you customize your own team of heroes, challenge other players in real-time matches, collect and upgrade over 80 characters, and enjoy stunning graphics and sound effects. In this article, we will tell you more about this game and how you can download and install FRAG Pro Shooter MOD APK, which gives you unlimited money and no ads.

      -

      frag pro shooter mod apkdone


      Download Zip ✶✶✶ https://urllie.com/2uNEzq



      -

      What is FRAG Pro Shooter?

      -

      FRAG Pro Shooter is a game developed by Oh BiBi, a French studio that specializes in mobile games. The game was released in 2019 and has since gained over 70 million downloads and 4.5 stars rating on Google Play Store. The game is inspired by popular shooter games like Overwatch, Fortnite, and Call of Duty, but with a unique twist: you can switch between different heroes in your team during the match, each with their own skills and abilities. You can also create your own team of heroes, choosing from over 80 characters with different roles, such as attackers, defenders, campers, or runners.

      -

      Features of FRAG Pro Shooter

      -

      FRAG Pro Shooter has many features that make it a fun and exciting game to play. Here are some of them:

      -

      Customize your own team of heroes

      -

      You can create your own team of five heroes, each with their own strengths and weaknesses. You can choose from over 80 characters, each with their own personality, design, and voice. You can also customize their appearance, weapons, cards, and skins. You can experiment with different combinations of heroes to find the best strategy for each match.

      -

      -

      Challenge other players in real-time matches

      -

      You can compete with other players from around the world in real-time matches. You can join solo or duo modes, or create your own club with your friends. You can also participate in events, tournaments, and leagues to win rewards and rank up. You can chat with other players in the game or on social media platforms like Discord or YouTube.

      -

      Collect and upgrade over 80 characters

      -

      You can collect new characters by opening chests or buying them with coins or gems. You can also upgrade your characters by using cards or coins. Upgrading your characters will increase their stats, such as health, damage, speed, and range. You can also unlock new skills and abilities for your characters as you level them up.

      -

      Enjoy stunning graphics and sound effects

      -

      FRAG Pro Shooter has amazing graphics and sound effects that make the game more immersive and realistic. The game has colorful and detailed 3D graphics that show the different environments, characters, weapons, and effects. The game also has dynamic and lively sound effects that match the actions and events in the game. You can hear the gunshots, explosions, screams, cheers, and taunts of the characters.

      -

      What is FRAG Pro Shooter MOD APK?

      -

      FRAG Pro Shooter MOD APK is a modified version of the original game that gives you some extra benefits that are not available in the original game. These benefits include unlimited money and no ads. With unlimited money, you can buy anything you want in the game, such as chests, characters, skins, cards, and more. You can also upgrade your characters faster and easier. With no ads, you can enjoy the game without any interruptions or distractions. You can also save your data and battery life by not watching ads.

      -

      Benefits of FRAG Pro Shooter MOD APK

      -

      FRAG Pro Shooter MOD APK has many benefits that make it a better choice than the original game. Here are some of them:

      -

      Unlimited money to buy anything you want

      -

      With FRAG Pro Shooter MOD APK, you will have unlimited money in the game. You can use this money to buy anything you want, such as chests, characters, skins, cards, and more. You can also use this money to upgrade your characters faster and easier. You don't have to worry about running out of money or spending real money to buy gems or coins. You can enjoy the game without any limitations or restrictions.

      -

      No ads to interrupt your gameplay

      -

      With FRAG Pro Shooter MOD APK, you will not see any ads in the game. You can enjoy the game without any interruptions or distractions. You don't have to watch ads to get rewards or bonuses. You don't have to wait for ads to load or skip them. You can also save your data and battery life by not watching ads. You can have a smooth and seamless gaming experience.

      -

      Easy to install and use

      -

      FRAG Pro Shooter MOD APK is easy to install and use. You don't need to root your device or use any other tools or apps to install it. You just need to download the APK file from a trusted source and follow the simple steps to install it on your device. You don't need to create an account or sign in to play the game. You just need to launch the game and enjoy.

      -

      How to download and install FRAG Pro Shooter MOD APK?

      -

      If you want to download and install FRAG Pro Shooter MOD APK on your Android device, you need to follow these steps:

      -

      Step-by-step guide for Android devices

      -

      Allow unknown sources on your device

      -

      Before you can install FRAG Pro Shooter MOD APK on your device, you need to allow unknown sources on your device. This will enable you to install apps that are not from the Google Play Store. To do this, you need to go to your device settings and look for the security or privacy option. Then, you need to find the unknown sources option and enable it. This will allow you to install FRAG Pro Shooter MOD APK on your device.

      -

      Download the APK file from a trusted source

      -

      Next, you need to download the APK file of FRAG Pro Shooter MOD APK from a trusted source. You can search for it on Google or use the link provided below. Make sure that you download the latest version of the APK file that is compatible with your device. Also, make sure that you download it from a safe and secure website that does not contain any viruses or malware.

      -

      Locate and install the APK file on your device

      -

      After you have downloaded the APK file of FRAG Pro Shooter MOD APK, you need to locate and install it on your device. To do this, you need to go to your file manager and look for the folder where you saved the APK file. Then, you need to tap on the APK file and follow the instructions on the screen to install it on your device. This may take a few seconds or minutes depending on your device speed and performance.

      -

      Launch the game and enjoy

      -

      Finally, you can launch the game and enjoy it on your device. You will see that you have unlimited money and no ads in the game. You can buy anything you want, upgrade your characters, challenge other players, and have fun with FRAG Pro Shooter MOD APK.

      -

      Conclusion

      -

      FRAG Pro Shooter is a fun and exciting online shooter game that lets you customize your own team of heroes, challenge other players in real-time matches, collect and upgrade over 80 characters, and enjoy stunning graphics and sound effects. If you want to have more benefits in the game, such as unlimited money and no ads, you can download and install FRAG Pro Shooter MOD APK on your Android device. This is a modified version of the original game that gives you these extra benefits for free. You just need to follow the simple steps above to download and install FRAG Pro Shooter MOD APK on your device.

      -

      We hope that this article was helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you. Thank you for reading and happy gaming!

      -

      Here are some FAQs that you might find useful:

      -

      FAQs

      -

      Is FRAG Pro Shooter MOD APK safe to use?

      -

      Yes, FRAG Pro Shooter MOD APK is safe to use as long as you download it from a trusted source and follow the installation steps correctly. However, you should always be careful when downloading and installing any modded or hacked apps on your device, as they may contain viruses or malware that can harm your device or compromise your privacy. You should also avoid using FRAG Pro Shooter MOD APK on your main account or device, as it may get banned or blocked by the game developers or Google.

      -

      Can I play FRAG Pro Shooter MOD APK with my friends?

      -

      Yes, you can play FRAG Pro Shooter MOD APK with your friends, as long as they also have the same version of the modded app installed on their devices. You can join or create clubs with your friends and challenge other players in real-time matches. You can also chat with your friends in the game or on social media platforms like Discord or YouTube.

      -

      What are the best characters to use in FRAG Pro Shooter?

      -

      The best characters to use in FRAG Pro Shooter depend on your personal preference, play style, and strategy. However, some of the most popular and powerful characters in the game are Jet, Lolly Pop, Andrometa, R0N1N, and Dr. Frost. These characters have high damage, range, speed, and skills that can help you win matches. You can also try different combinations of characters to find the best team for each match.

      -

      How can I get more chests, coins, gems, and cards in FRAG Pro Shooter?

      -

      You can get more chests, coins, gems, and cards in FRAG Pro Shooter by playing the game regularly, completing missions and challenges, participating in events and tournaments, ranking up in leagues, watching ads, and inviting friends. You can also use FRAG Pro Shooter MOD APK to get unlimited money and no ads in the game.

      -

      How can I update FRAG Pro Shooter MOD APK?

      -

      You can update FRAG Pro Shooter MOD APK by downloading the latest version of the APK file from a trusted source and installing it on your device. You should always check for updates regularly to enjoy the latest features and bug fixes of the game. However, you should also backup your game data before updating to avoid losing your progress or settings.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download and Install Shape-shifting on PC with MuMu Player - The Most Excellent Android Emulator.md b/spaces/fatiXbelha/sd/Download and Install Shape-shifting on PC with MuMu Player - The Most Excellent Android Emulator.md deleted file mode 100644 index 7bbf64d269b7e69a313ab2e783bcce5d20139750..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download and Install Shape-shifting on PC with MuMu Player - The Most Excellent Android Emulator.md +++ /dev/null @@ -1,169 +0,0 @@ - -

      Shape-shifting PC Download: How to Play This Amazing Racing Game on Your Computer

      -

      If you are looking for a fun and challenging racing game that lets you transform your character into different shapes and vehicles, then you should try Shape-shifting. This game is developed by Sixcube and is available for Android devices. However, if you want to enjoy this game on a bigger screen with better graphics and controls, you can also play it on your PC using an emulator. In this article, we will show you how to download and play Shape-shifting on PC with three popular emulators: BlueStacks, MuMu Player, and LDPlayer.

      -

      What is Shape-shifting?

      -

      A brief introduction to the game and its features

      -

      Shape-shifting is a racing game that tests your strategy and reaction skills. The goal of the game is to shape-shift your character as many times as necessary to adapt to the environment and reach the finish line before your opponents. You can choose from various elements such as helicopters, cars, boats, or stick figures, depending on the terrain and obstacles. The game has exciting levels with different environments and challenges. You can also customize your character with different skins and accessories.

      -

      shape-shifting pc download


      Download ->->->-> https://urllie.com/2uNDc0



      -

      Why play Shape-shifting on PC?

      -

      Benefits of using an emulator to play Android games on PC

      -

      Playing Shape-shifting on PC has many advantages over playing it on your mobile device. Here are some of them:

      -
        -
      • You can enjoy the game on a larger screen with higher resolution and quality.
      • -
      • You can use your keyboard, mouse, or gamepad to control your character more precisely and comfortably.
      • -
      • You can avoid the battery drain, overheating, or data consumption issues that may occur on your mobile device.
      • -
      • You can access various emulator features that can enhance your gaming experience and performance.
      • -
      -

      Comparison of different emulators for Shape-shifting

      -

      There are many emulators that can run Android games on PC, but not all of them are compatible or optimized for Shape-shifting. Here are three of the best emulators that we recommend for playing this game:

      - - - - - - - - - - - - - - - - - - - - - -
      EmulatorProsCons
      BlueStacks- The most popular and trusted Android emulator for PC
      - Supports high-definition graphics and high FPS
      - Has many advanced features such as Macros, Multi Instance, Multi Instance Sync, Script, etc.
      - Easy to install and use
      - Requires a high-end PC to run smoothly
      - May have some compatibility issues with some games or apps
      MuMu Player- A fast and lightweight Android emulator for PC
      - Supports custom control and image recognition function
      - Has many useful features such as Multi-drive Operation, Record, Big Screen/High FPS, etc.
      - Compatible with most games and apps
      - Has fewer features than BlueStacks
      - May have some bugs or glitches
      LDPlayer- A powerful and stable Android emulator for PC
      - Supports keyboard mapping and gamepad support
      - Has many features such as High Performance Mode, Synchronizer, Operation Recorder, etc.
      - Compatible with most games and apps
      - May consume more CPU and RAM than other emulators
      - May have some ads or pop-ups
      -

      You can choose any of these emulators based on your preference and PC specifications. In the following sections, we will show you how to download and play Shape-shifting on PC with each of them.

      -

      How to download and play Shape-shifting on PC with BlueStacks

      -

      Step-by-step guide to install and run BlueStacks on PC

      -

      To play Shape-shifting on PC with BlueStacks, you need to follow these steps:

      -
        -
      1. Download the latest version of BlueStacks from its official website: https://www.bluestacks.com/
      2. -
      3. Run the installer and follow the instructions to complete the installation process.
      4. -
      5. Launch BlueStacks and sign in with your Google account. If you don't have one, you can create one for free.
      6. -
      7. Go to the Google Play Store and search for Shape-shifting. Alternatively, you can use the search bar on the home screen and type "Shape-shifting".
      8. -
      9. Click on the game icon and then click on "Install" to download and install the game.
      10. -
      11. Once the installation is done, you can find the game icon on the home screen or in the "My Games" tab. Click on it to launch the game.
      12. -
      -

      How to use BlueStacks features to enhance your gaming experience

      -

      BlueStacks has many features that can help you play Shape-shifting better on PC. Here are some of them:

      -

      Macros

      -

      This feature allows you to record and execute a sequence of actions with a single keystroke. You can use it to automate repetitive tasks or create custom combos. For example, you can record a macro that shape-shifts your character into a car, then a boat, then a helicopter, and assign it to a key. Then, you can press that key whenever you need to perform that sequence in the game.

      -

      Multi Instance

      -

      This feature allows you to run multiple instances of BlueStacks on your PC. You can use it to play different games or apps simultaneously, or play the same game with multiple accounts. For example, you can use it to play Shape-shifting with your friends or family members on the same PC.

      -

      Multi Instance Sync

      -

      This feature allows you to synchronize the actions of multiple instances of BlueStacks. You can use it to replicate the same gameplay across all instances. For example, you can use it to play Shape-shifting with multiple accounts and shape-shift all of them at the same time.

      -

      shape-shifting game download for pc
      -shape-shifting pc emulator
      -shape-shifting racing game for pc
      -how to play shape-shifting on pc
      -shape-shifting android game on pc
      -shape-shifting pc version
      -shape-shifting free download for pc
      -shape-shifting pc gameplay
      -shape-shifting pc requirements
      -shape-shifting pc online
      -shape-shifting pc review
      -shape-shifting pc cheats
      -shape-shifting pc mods
      -shape-shifting pc controller support
      -shape-shifting pc steam
      -shape-shifting pc windows 10
      -shape-shifting pc bluestacks
      -shape-shifting pc mumu player
      -shape-shifting pc ldplayer
      -shape-shifting pc nox player
      -shape-shifting pc memu play
      -shape-shifting pc apk download
      -shape-shifting pc full version
      -shape-shifting pc crack
      -shape-shifting pc patch
      -shape-shifting pc update
      -shape-shifting pc tips and tricks
      -shape-shifting pc guide
      -shape-shifting pc walkthrough
      -shape-shifting pc best settings
      -shape-shifting pc high fps
      -shape-shifting pc graphics quality
      -shape-shifting pc sound effects
      -shape-shifting pc music soundtrack
      -shape-shifting pc characters list
      -shape-shifting pc levels map
      -shape-shifting pc transform options
      -shape-shifting pc strategy guide
      -shape-shifting pc multiplayer mode
      -shape-shifting pc offline mode
      -shape-shifting pc custom mode
      -shape-shifting pc fun mode
      -shape-shifting pc hard mode
      -shape-shifting pc easy mode
      -shape-shifting pc endless mode
      -shape-shifting pc challenge mode
      -shape-shifting pc bonus mode
      -shape-shifting pc achievements list

      -

      Script

      -

      This feature allows you to write and execute scripts that can automate various tasks or commands in the game. You can use it to create custom macros, shortcuts, or functions. For example, you can write a script that automatically shape-shifts your character into the best element for each level.

      -

      How to download and play Shape-shifting on PC with MuMu Player

      -

      Step-by-step guide to install and run MuMu Player on PC

      -

      To play Shape-shifting on PC with MuMu Player, you need to follow these steps:

      -
        -
      1. Download the latest version of MuMu Player from its official website: https://mumu.163.com/en/
      2. -
      3. Run the installer and follow the instructions to complete the installation process.
      4. -
      5. Launch MuMu Player and sign in with your Google account. If you don't have one, you can create one for free.
      6. -
      7. Go to the Google Play Store and search for Shape-shifting. Alternatively, you can use the search bar on the home screen and type "Shape-shifting".
      8. -
      9. Click on the game icon and then click on "Install" to download and install the game.
      10. -
      11. Once the installation is done, you can find the game icon on the home screen or in the "My Games" tab. Click on it to launch the game.
      12. -
      -

      How to use MuMu Player features to enhance your gaming experience

      -

      MuMu Player has many features that can help you play Shape-shifting better on PC. Here are some of them:

      -

      Custom Control

      -

      This feature allows you to customize the keyboard and mouse settings for each game. You can adjust the sensitivity, key mapping, and mouse mode according to your preference. You can also use the image recognition function to automatically detect and match the game interface. For example, you can use this feature to set the keys for shape-shifting, accelerating, and braking in Shape-shifting.

      -

      Multi-drive Operation

      -

      This feature allows you to create and manage multiple emulators on your PC. You can use it to run different games or apps simultaneously, or play the same game with multiple accounts. For example, you can use it to play Shape-shifting with your friends or family members on the same PC.

      -

      Record

      -

      This feature allows you to record your gameplay and save it as a video file. You can use it to capture your best moments, share your tips and tricks, or create tutorials for other players. For example, you can use it to record your shape-shifting skills and show them off to your friends.

      -

      Big Screen/High FPS

      -

      This feature allows you to enjoy the game on a larger screen with higher resolution and quality. You can use it to optimize the game performance and graphics settings for your PC. For example, you can use it to play Shape-shifting in full screen mode with high FPS and smooth animation.

      -

      How to download and play Shape-shifting on PC with LDPlayer

      -

      Step-by-step guide to install and run LDPlayer on PC

      -

      To play Shape-shifting on PC with LDPlayer, you need to follow these steps:

      -
        -
      1. Download the latest version of LDPlayer from its official website: https://www.ldplayer.net/
      2. -
      3. Run the installer and follow the instructions to complete the installation process.
      4. -
      5. Launch LDPlayer and sign in with your Google account. If you don't have one, you can create one for free.
      6. -
      7. Go to the LD Store and search for Shape-shifting. Alternatively, you can use the search bar on the home screen and type "Shape-shifting".
      8. -
      9. Click on the game icon and then click on "Install" to download and install the game.
      10. -
      11. Once the installation is done, you can find the game icon on the home screen or in the "My Games" tab. Click on it to launch the game.
      12. -
      -

      How to use LDPlayer features to enhance your gaming experience

      -

      LDPlayer has many features that can help you play Shape-shifting better on PC. Here are some of them:

      -

      Keyboard Mapping

      -

      This feature allows you to customize the keyboard settings for each game. You can assign keys for different functions and actions in the game. You can also adjust the sensitivity and response time of the keys. For example, you can use this feature to set the keys for shape-shifting, accelerating, and braking in Shape-shifting.

      -

      Gamepad Support

      -

      This feature allows you to connect and use a gamepad or controller to play games on PC. You can choose from different gamepad models and modes according to your preference. You can also map buttons for different functions and actions in the game. For example, you can use this feature to play Shape-shifting with a gamepad or controller instead of a keyboard and mouse.

      -

      High Performance Mode

      -

      This feature allows you to boost the game performance and speed up the loading time. You can use it to optimize the CPU, RAM, and GPU usage for each game. You can also enable or disable some settings such as anti-aliasing, frame rate limit, etc. For example, you can use this feature to play Shape-shifting with high performance mode and enjoy a smooth gameplay.

      -

      Conclusion

      -

      Summary of the main points and recommendations

      -

      In conclusion, Shape-shifting is an amazing racing game that lets you transform your character into different shapes and vehicles. You can play this game on your PC using an emulator such as BlueStacks, MuMu Player, or LDPlayer. Each emulator has its own pros and cons, as well as features that can enhance your gaming experience. We recommend that you try them out and see which one suits you best. We hope that this article has helped you learn how to download and play Shape-shifting on PC with ease.

      -

      FAQs

      Five unique questions and answers related to the topic -

      Here are some frequently asked questions and answers about Shape-shifting and playing it on PC:

      -
        -
      1. Is Shape-shifting free to play?
        -Yes, Shape-shifting is a free-to-play game that you can download and play on your Android device or PC. However, the game may contain some in-app purchases or ads that you can choose to buy or watch to support the developers.
      2. -
      3. Can I play Shape-shifting offline?
        -Yes, you can play Shape-shifting offline without an internet connection. However, some features or functions may not be available or updated when you play offline. For example, you may not be able to access the online leaderboard, shop, or events.
      4. -
      5. How can I update Shape-shifting on PC?
        -To update Shape-shifting on PC, you need to update the emulator that you are using to play the game. You can check for updates on the emulator's website or app store. Alternatively, you can also update the game from the Google Play Store within the emulator.
      6. -
      7. How can I transfer my Shape-shifting progress from my mobile device to my PC?
        -To transfer your Shape-shifting progress from your mobile device to your PC, you need to sync your game data with your Google account. You can do this by logging in with the same Google account on both devices and enabling the cloud save option in the game settings. Then, you can load your game data from the cloud on your PC.
      8. -
      9. How can I contact the support team of Shape-shifting?
        -To contact the support team of Shape-shifting, you can send an email to shapeshifting@sixcube.com or visit their official website: https://www.sixcube.com/. You can also follow their social media accounts on Facebook, Twitter, and Instagram for more updates and news.
      10. -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/fclong/summary/fengshen/data/data_utils/token_type_utils.py b/spaces/fclong/summary/fengshen/data/data_utils/token_type_utils.py deleted file mode 100644 index 3b805d23b9aa4cda495d3b76ecba7effdc2854eb..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/data/data_utils/token_type_utils.py +++ /dev/null @@ -1,25 +0,0 @@ -def create_tokens_and_tokentypes(tokens_a, tokens_b, cls_id, sep_id): - """Merge segments A and B, add [CLS] and [SEP] and build tokentypes.""" - - tokens = [] - tokentypes = [] - # [CLS]. - tokens.append(cls_id) - tokentypes.append(0) - # Segment A. - for token in tokens_a: - tokens.append(token) - tokentypes.append(0) - # [SEP]. - tokens.append(sep_id) - tokentypes.append(0) - # Segment B. - for token in tokens_b: - tokens.append(token) - tokentypes.append(1) - if tokens_b: - # [SEP]. - tokens.append(sep_id) - tokentypes.append(1) - - return tokens, tokentypes diff --git a/spaces/fclong/summary/fengshen/models/DAVAE/BertForLatentConnector.py b/spaces/fclong/summary/fengshen/models/DAVAE/BertForLatentConnector.py deleted file mode 100644 index 08dffce16874a4b263fb604380e5490645cb483e..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/models/DAVAE/BertForLatentConnector.py +++ /dev/null @@ -1,137 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""PyTorch BERT model. """ - -from __future__ import absolute_import, division, print_function, unicode_literals - -import json -import logging -import math -import os -import sys -from io import open - -import pdb - -import torch -from torch import nn -from transformers import BertConfig,BertPreTrainedModel -from transformers.models.bert.modeling_bert import BertEmbeddings,BertEncoder,BertPooler - - -class BertForLatentConnector(BertPreTrainedModel): - r""" - Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: - **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` - Sequence of hidden-states at the output of the last layer of the model. - **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)`` - Last layer hidden-state of the first token of the sequence (classification token) - further processed by a Linear layer and a Tanh activation function. The Linear - layer weights are trained from the next sentence prediction (classification) - objective during Bert pretraining. This output is usually *not* a good summary - of the semantic content of the input, you're often better with averaging or pooling - the sequence of hidden-states for the whole input sequence. - **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) - list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) - of shape ``(batch_size, sequence_length, hidden_size)``: - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - **attentions**: (`optional`, returned when ``config.output_attentions=True``) - list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. - - Examples:: - - tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') - model = BertModel.from_pretrained('bert-base-uncased') - input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 - outputs = model(input_ids) - last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple - - """ - def __init__(self, config, latent_size): - super(BertForLatentConnector, self).__init__(config) - - self.embeddings = BertEmbeddings(config) - self.encoder = BertEncoder(config) - self.pooler = BertPooler(config) - - self.linear = nn.Linear(config.hidden_size, 2 * latent_size, bias=False) - - self.init_weights() - - def _resize_token_embeddings(self, new_num_tokens): - old_embeddings = self.embeddings.word_embeddings - new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) - self.embeddings.word_embeddings = new_embeddings - return self.embeddings.word_embeddings - - def _prune_heads(self, heads_to_prune): - """ Prunes heads of the model. - heads_to_prune: dict of {layer_num: list of heads to prune in this layer} - See base class PreTrainedModel - """ - for layer, heads in heads_to_prune.items(): - self.encoder.layer[layer].attention.prune_heads(heads) - - def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, emb_noise=None): - if attention_mask is None: - attention_mask = torch.ones_like(input_ids) - if token_type_ids is None: - token_type_ids = torch.zeros_like(input_ids) - - # We create a 3D attention mask from a 2D tensor mask. - # Sizes are [batch_size, 1, 1, to_seq_length] - # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] - # this attention mask is more simple than the triangular masking of causal attention - # used in OpenAI GPT, we just need to prepare the broadcast dimension here. - extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) - - # Since attention_mask is 1.0 for positions we want to attend and 0.0 for - # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. - # Since we are adding it to the raw scores before the softmax, this is - # effectively the same as removing these entirely. - extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility - extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 - - # Prepare head mask if needed - # 1.0 in head_mask indicate we keep the head - # attention_probs has shape bsz x n_heads x N x N - # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] - # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] - if head_mask is not None: - if head_mask.dim() == 1: - head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) - head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) - elif head_mask.dim() == 2: - head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer - head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility - else: - head_mask = [None] * self.config.num_hidden_layers - - embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids) - - if emb_noise is not None: - embedding_output = embedding_output + emb_noise(embedding_output).to(embedding_output.dtype) - - encoder_outputs = self.encoder(embedding_output, - extended_attention_mask, - head_mask=head_mask) - sequence_output = encoder_outputs[0] - pooled_output = self.pooler(sequence_output) - - outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here - return outputs # sequence_output, pooled_output, (hidden_states), (attentions) diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Blob Hero APK for Android - Free Casual Game.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Blob Hero APK for Android - Free Casual Game.md deleted file mode 100644 index 4c9c185daee0a7adc3f25340864344794f0e3b53..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Blob Hero APK for Android - Free Casual Game.md +++ /dev/null @@ -1,110 +0,0 @@ -
      -

      Blob Hero APKPure: A Fun and Addictive Casual Game

      -

      If you are looking for a simple yet entertaining game to play on your Android device, you might want to check out Blob Hero APKPure. Blob Hero is a casual game developed by Rollic Games, where you control a blob of slime and try to save the world from evil blobs. In this article, we will tell you more about what Blob Hero is, what APKPure is, and how to download and install Blob Hero from APKPure.

      -

      What is Blob Hero?

      -

      Blob Hero is a game that combines physics, puzzle, and action elements. You play as a blob of slime that can change its shape and size by absorbing other blobs. Your goal is to use your blob powers to defeat the evil blobs that are trying to take over the world. You will encounter different types of enemies, obstacles, and environments as you progress through the levels.

      -

      blob hero apkpure


      Download ✸✸✸ https://gohhs.com/2uPo12



      -

      The gameplay of Blob Hero

      -

      The gameplay of Blob Hero is simple and intuitive. You just need to tap and drag on the screen to move your blob around. You can also swipe to make your blob jump or dash. You can absorb other blobs by touching them, which will make your blob bigger and stronger. However, you also need to be careful not to touch the spikes, lava, or other hazards that can harm your blob.

      -

      You can use your blob abilities to solve puzzles and overcome challenges. For example, you can use your blob to push buttons, activate switches, break walls, or bounce off platforms. You can also use your blob to fight against the evil blobs, who have different shapes and abilities. Some of them can shoot projectiles, explode, or split into smaller blobs. You need to use your blob skills and strategy to defeat them.

      -

      blob hero apk download
      -blob hero android game
      -blob hero mod apk
      -blob hero latest version
      -blob hero rollic games
      -blob hero free download
      -blob hero app store
      -blob hero online play
      -blob hero gameplay
      -blob hero tips and tricks
      -blob hero hack apk
      -blob hero unlimited money
      -blob hero review
      -blob hero rating
      -blob hero cheats
      -blob hero guide
      -blob hero walkthrough
      -blob hero levels
      -blob hero update
      -blob hero new features
      -blob hero best strategy
      -blob hero how to play
      -blob hero fun game
      -blob hero casual game
      -blob hero addictive game
      -blob hero apk mirror
      -blob hero apk pure com
      -blob hero for pc
      -blob hero for ios
      -blob hero for windows
      -blob hero for mac
      -blob hero for laptop
      -blob hero for tablet
      -blob hero for chromebook
      -blob hero for firestick
      -blob hero for smart tv
      -blob hero offline game
      -blob hero no wifi game
      -blob hero no ads game
      -blob hero premium game
      -blob hero pro game
      -blob hero full game
      -blob hero unlocked game
      -blob hero paid game
      -blob hero cracked game
      -blob hero patched game
      -blob hero obb file download

      -

      The features of Blob Hero

      -

      Blob Hero has many features that make it a fun and addictive game. Some of them are:

      -
        -
      • Over 100 levels with varying difficulty and themes
      • -
      • Cute and colorful graphics and animations
      • -
      • Funny and quirky sound effects and music
      • -
      • Easy and smooth controls
      • -
      • Leaderboards and achievements
      • -
      • Regular updates with new levels and features
      • -
      -

      What is APKPure?

      -

      APKPure is a website and app that allows you to download and install Android apps and games for free. APKPure offers a large collection of apps and games that are not available on the Google Play Store or are region-locked. APKPure also provides fast and safe downloads, as well as updates for the apps and games you have installed.

      -

      The benefits of using APKPure

      -

      There are many benefits of using APKPure to download and install Android apps and games. Some of them are:

      -
        -
      • You can access apps and games that are not available on the Google Play Store or in your region
      • -
      • You can download apps and games faster and easier than using other sources
      • -
      • You can update your apps and games without waiting for the Google Play Store approval
      • -
      • You can save storage space on your device by downloading only the files you need
      • -
      • You can enjoy a user-friendly interface and design
      • -
      • You can discover new and popular apps and games every day
      • -
      -

      How to download and install Blob Hero from APKPure

      -

      If you want to download and install Blob Hero from APKPure, you need to follow these steps:

      -
        -
      1. Go to https://apkpure.com/blob-hero/com.quok.blobHero on your browser or open the APKPure app on your device
      2. -
      3. Tap on the "Download APK" button or scan the QR code on the website
      4. -
      5. Wait for the download to finish and then open the downloaded file
      6. -
      7. If prompted, enable the installation of apps from unknown sources on your device settings
      8. -
      9. Follow the instructions on the screen to install Blob Hero
      10. -
      11. Enjoy playing Blob Hero on your device
      12. -
      -

      Conclusion

      -

      Blob Hero APKPure is a fun and addictive casual game that you can download and install for free from APKPure. Blob Hero is a game where you control a blob of slime and try to save the world from evil blobs. You can use your blob abilities to solve puzzles, overcome obstacles, and fight enemies. Blob Hero has over 100 levels, cute graphics, funny sounds, easy controls, leaderboards, achievements, and regular updates. APKPure is a website and app that allows you to download and install Android apps and games that are not available on the Google Play Store or in your region. APKPure also provides fast and safe downloads, updates, storage saving, user-friendly interface, and daily recommendations.

      -

      Why you should try Blob Hero APKPure

      -

      You should try Blob Hero APKPure if you are looking for a game that:

      -
        -
      • Is simple yet entertaining
      • -
      • Has physics, puzzle, and action elements
      • -
      • Has a lot of variety and challenge
      • -
      • Is free and easy to download and install
      • -
      • Is compatible with most Android devices
      • -
      -

      FAQs

      -

      Here are some frequently asked questions about Blob Hero APKPure:

      -
        -
      1. What is the size of Blob Hero APKPure?
        The size of Blob Hero APKPure is about 65 MB.
      2. -
      3. Is Blob Hero APKPure safe to download and install?
        Yes, Blob Hero APKPure is safe to download and install. APKPure verifies the security of all the apps and games before uploading them to their website and app.
      4. -
      5. How can I update Blob Hero APKPure?
        You can update Blob Hero APKPure by using the APKPure app or website. You will get a notification when there is a new version available for Blob Hero.
      6. -
      7. Can I play Blob Hero offline?
        Yes, you can play Blob Hero offline. However, you will need an internet connection to access some features such as leaderboards and achievements.
      8. -
      9. Can I play Blob Hero with friends?
        No, Blob Hero does not have a multiplayer mode. However, you can compare your scores and achievements with other players on the leaderboards.
      10. -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fffffu/bing/src/components/ui/dialog.tsx b/spaces/fffffu/bing/src/components/ui/dialog.tsx deleted file mode 100644 index 925e77fe7858fb218b5115b4e225174a886e0f02..0000000000000000000000000000000000000000 --- a/spaces/fffffu/bing/src/components/ui/dialog.tsx +++ /dev/null @@ -1,128 +0,0 @@ -'use client' - -import * as React from 'react' -import * as DialogPrimitive from '@radix-ui/react-dialog' - -import { cn } from '@/lib/utils' -import { IconClose } from '@/components/ui/icons' - -const Dialog = DialogPrimitive.Root - -const DialogTrigger = DialogPrimitive.Trigger - -const DialogPortal = ({ - className, - children, - ...props -}: DialogPrimitive.DialogPortalProps) => ( - -
      - {children} -
      -
      -) -DialogPortal.displayName = DialogPrimitive.Portal.displayName - -const DialogOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogOverlay.displayName = DialogPrimitive.Overlay.displayName - -const DialogContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - - {children} - - - Close - - - -)) -DialogContent.displayName = DialogPrimitive.Content.displayName - -const DialogHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
      -) -DialogHeader.displayName = 'DialogHeader' - -const DialogFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
      -) -DialogFooter.displayName = 'DialogFooter' - -const DialogTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogTitle.displayName = DialogPrimitive.Title.displayName - -const DialogDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogDescription.displayName = DialogPrimitive.Description.displayName - -export { - Dialog, - DialogTrigger, - DialogContent, - DialogHeader, - DialogFooter, - DialogTitle, - DialogDescription -} diff --git a/spaces/fffiloni/ControlVideo/models/controlnet_attention.py b/spaces/fffiloni/ControlVideo/models/controlnet_attention.py deleted file mode 100644 index e45cde9a508b3b81c4359b3220aedf4d26edb3c5..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/ControlVideo/models/controlnet_attention.py +++ /dev/null @@ -1,483 +0,0 @@ -# Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py - -from dataclasses import dataclass -from typing import Optional, Callable -import math -import torch -import torch.nn.functional as F -from torch import nn -from positional_encodings.torch_encodings import PositionalEncoding2D - -from diffusers.configuration_utils import ConfigMixin, register_to_config -from diffusers import ModelMixin -from diffusers.utils import BaseOutput -from diffusers.utils.import_utils import is_xformers_available -from diffusers.models.attention import CrossAttention, FeedForward, AdaLayerNorm -from einops import rearrange, repeat - - -@dataclass -class Transformer3DModelOutput(BaseOutput): - sample: torch.FloatTensor - - -if is_xformers_available(): - import xformers - import xformers.ops -else: - xformers = None - - -class Transformer3DModel(ModelMixin, ConfigMixin): - @register_to_config - def __init__( - self, - num_attention_heads: int = 16, - attention_head_dim: int = 88, - in_channels: Optional[int] = None, - num_layers: int = 1, - dropout: float = 0.0, - norm_num_groups: int = 32, - cross_attention_dim: Optional[int] = None, - attention_bias: bool = False, - activation_fn: str = "geglu", - num_embeds_ada_norm: Optional[int] = None, - use_linear_projection: bool = False, - only_cross_attention: bool = False, - upcast_attention: bool = False, - ): - super().__init__() - self.use_linear_projection = use_linear_projection - self.num_attention_heads = num_attention_heads - self.attention_head_dim = attention_head_dim - inner_dim = num_attention_heads * attention_head_dim - - # Define input layers - self.in_channels = in_channels - - self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True) - if use_linear_projection: - self.proj_in = nn.Linear(in_channels, inner_dim) - else: - self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0) - - # Define transformers blocks - self.transformer_blocks = nn.ModuleList( - [ - BasicTransformerBlock( - inner_dim, - num_attention_heads, - attention_head_dim, - dropout=dropout, - cross_attention_dim=cross_attention_dim, - activation_fn=activation_fn, - num_embeds_ada_norm=num_embeds_ada_norm, - attention_bias=attention_bias, - only_cross_attention=only_cross_attention, - upcast_attention=upcast_attention, - ) - for d in range(num_layers) - ] - ) - - # 4. Define output layers - if use_linear_projection: - self.proj_out = nn.Linear(in_channels, inner_dim) - else: - self.proj_out = nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) - - def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, return_dict: bool = True): - # Input - assert hidden_states.dim() == 5, f"Expected hidden_states to have ndim=5, but got ndim={hidden_states.dim()}." - video_length = hidden_states.shape[2] - hidden_states = rearrange(hidden_states, "b c f h w -> (b f) c h w") - encoder_hidden_states = repeat(encoder_hidden_states, 'b n c -> (b f) n c', f=video_length) - - batch, channel, height, weight = hidden_states.shape - residual = hidden_states - - hidden_states = self.norm(hidden_states) - if not self.use_linear_projection: - hidden_states = self.proj_in(hidden_states) - inner_dim = hidden_states.shape[1] - hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim) - else: - inner_dim = hidden_states.shape[1] - hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim) - hidden_states = self.proj_in(hidden_states) - - # Blocks - for block in self.transformer_blocks: - hidden_states = block( - hidden_states, - encoder_hidden_states=encoder_hidden_states, - timestep=timestep, - video_length=video_length - ) - - # Output - if not self.use_linear_projection: - hidden_states = ( - hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous() - ) - hidden_states = self.proj_out(hidden_states) - else: - hidden_states = self.proj_out(hidden_states) - hidden_states = ( - hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous() - ) - - output = hidden_states + residual - - output = rearrange(output, "(b f) c h w -> b c f h w", f=video_length) - if not return_dict: - return (output,) - - return Transformer3DModelOutput(sample=output) - - -class BasicTransformerBlock(nn.Module): - def __init__( - self, - dim: int, - num_attention_heads: int, - attention_head_dim: int, - dropout=0.0, - cross_attention_dim: Optional[int] = None, - activation_fn: str = "geglu", - num_embeds_ada_norm: Optional[int] = None, - attention_bias: bool = False, - only_cross_attention: bool = False, - upcast_attention: bool = False, - ): - super().__init__() - self.only_cross_attention = only_cross_attention - self.use_ada_layer_norm = num_embeds_ada_norm is not None - - # Individual-Attn - self.attn1 = IndividualAttention( - query_dim=dim, - heads=num_attention_heads, - dim_head=attention_head_dim, - dropout=dropout, - bias=attention_bias, - cross_attention_dim=cross_attention_dim if only_cross_attention else None, - upcast_attention=upcast_attention, - ) - self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim) - - # Cross-Attn - if cross_attention_dim is not None: - self.attn2 = CrossAttention( - query_dim=dim, - cross_attention_dim=cross_attention_dim, - heads=num_attention_heads, - dim_head=attention_head_dim, - dropout=dropout, - bias=attention_bias, - upcast_attention=upcast_attention, - ) - else: - self.attn2 = None - - if cross_attention_dim is not None: - self.norm2 = AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim) - else: - self.norm2 = None - - # Feed-forward - self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn) - self.norm3 = nn.LayerNorm(dim) - - self.norm_temp = AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim) - - def set_use_memory_efficient_attention_xformers(self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable] = None): - if not is_xformers_available(): - print("Here is how to install it") - raise ModuleNotFoundError( - "Refer to https://github.com/facebookresearch/xformers for more information on how to install" - " xformers", - name="xformers", - ) - elif not torch.cuda.is_available(): - raise ValueError( - "torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is only" - " available for GPU " - ) - else: - try: - # Make sure we can run the memory efficient attention - _ = xformers.ops.memory_efficient_attention( - torch.randn((1, 2, 40), device="cuda"), - torch.randn((1, 2, 40), device="cuda"), - torch.randn((1, 2, 40), device="cuda"), - ) - except Exception as e: - raise e - self.attn1._use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers - if self.attn2 is not None: - self.attn2._use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers - # self.attn_temp._use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers - - def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, attention_mask=None, video_length=None): - # Individual-Attention - norm_hidden_states = ( - self.norm1(hidden_states, timestep) if self.use_ada_layer_norm else self.norm1(hidden_states) - ) - - if self.only_cross_attention: - hidden_states = ( - self.attn1(norm_hidden_states, encoder_hidden_states, attention_mask=attention_mask) + hidden_states - ) - else: - hidden_states = self.attn1(norm_hidden_states, attention_mask=attention_mask, video_length=video_length) + hidden_states - - if self.attn2 is not None: - # Cross-Attention - norm_hidden_states = ( - self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) - ) - hidden_states = ( - self.attn2( - norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask - ) - + hidden_states - ) - - # Feed-forward - hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states - - # # Temporal-Attention - # d = hidden_states.shape[1] - # hidden_states = rearrange(hidden_states, "(b f) d c -> (b d) f c", f=video_length) - # norm_hidden_states = ( - # self.norm_temp(hidden_states, timestep) if self.use_ada_layer_norm else self.norm_temp(hidden_states) - # ) - # hidden_states = self.attn_temp(norm_hidden_states) + hidden_states - # hidden_states = rearrange(hidden_states, "(b d) f c -> (b f) d c", d=d) - - return hidden_states - -class IndividualAttention(nn.Module): - r""" - A cross attention layer. - - Parameters: - query_dim (`int`): The number of channels in the query. - cross_attention_dim (`int`, *optional*): - The number of channels in the encoder_hidden_states. If not given, defaults to `query_dim`. - heads (`int`, *optional*, defaults to 8): The number of heads to use for multi-head attention. - dim_head (`int`, *optional*, defaults to 64): The number of channels in each head. - dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. - bias (`bool`, *optional*, defaults to False): - Set to `True` for the query, key, and value linear layers to contain a bias parameter. - """ - - def __init__( - self, - query_dim: int, - cross_attention_dim: Optional[int] = None, - heads: int = 8, - dim_head: int = 64, - dropout: float = 0.0, - bias=False, - upcast_attention: bool = False, - upcast_softmax: bool = False, - added_kv_proj_dim: Optional[int] = None, - norm_num_groups: Optional[int] = None, - ): - super().__init__() - inner_dim = dim_head * heads - cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim - self.upcast_attention = upcast_attention - self.upcast_softmax = upcast_softmax - - self.scale = dim_head**-0.5 - - self.heads = heads - # for slice_size > 0 the attention score computation - # is split across the batch axis to save memory - # You can set slice_size with `set_attention_slice` - self.sliceable_head_dim = heads - self._slice_size = None - self._use_memory_efficient_attention_xformers = False - self.added_kv_proj_dim = added_kv_proj_dim - - if norm_num_groups is not None: - self.group_norm = nn.GroupNorm(num_channels=inner_dim, num_groups=norm_num_groups, eps=1e-5, affine=True) - else: - self.group_norm = None - - self.to_q = nn.Linear(query_dim, inner_dim, bias=bias) - self.to_k = nn.Linear(cross_attention_dim, inner_dim, bias=bias) - self.to_v = nn.Linear(cross_attention_dim, inner_dim, bias=bias) - - if self.added_kv_proj_dim is not None: - self.add_k_proj = nn.Linear(added_kv_proj_dim, cross_attention_dim) - self.add_v_proj = nn.Linear(added_kv_proj_dim, cross_attention_dim) - - self.to_out = nn.ModuleList([]) - self.to_out.append(nn.Linear(inner_dim, query_dim)) - self.to_out.append(nn.Dropout(dropout)) - - def reshape_heads_to_batch_dim(self, tensor): - batch_size, seq_len, dim = tensor.shape - head_size = self.heads - tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size) - tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size * head_size, seq_len, dim // head_size) - return tensor - - def reshape_batch_dim_to_heads(self, tensor): - batch_size, seq_len, dim = tensor.shape - head_size = self.heads - tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) - tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size) - return tensor - - def set_attention_slice(self, slice_size): - if slice_size is not None and slice_size > self.sliceable_head_dim: - raise ValueError(f"slice_size {slice_size} has to be smaller or equal to {self.sliceable_head_dim}.") - - self._slice_size = slice_size - - def _attention(self, query, key, value, attention_mask=None): - if self.upcast_attention: - query = query.float() - key = key.float() - - attention_scores = torch.baddbmm( - torch.empty(query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device), - query, - key.transpose(-1, -2), - beta=0, - alpha=self.scale, - ) - - if attention_mask is not None: - attention_scores = attention_scores + attention_mask - - if self.upcast_softmax: - attention_scores = attention_scores.float() - - attention_probs = attention_scores.softmax(dim=-1) - - # cast back to the original dtype - attention_probs = attention_probs.to(value.dtype) - - # compute attention output - hidden_states = torch.bmm(attention_probs, value) - - # reshape hidden_states - hidden_states = self.reshape_batch_dim_to_heads(hidden_states) - return hidden_states - - def _sliced_attention(self, query, key, value, sequence_length, dim, attention_mask): - batch_size_attention = query.shape[0] - hidden_states = torch.zeros( - (batch_size_attention, sequence_length, dim // self.heads), device=query.device, dtype=query.dtype - ) - slice_size = self._slice_size if self._slice_size is not None else hidden_states.shape[0] - for i in range(hidden_states.shape[0] // slice_size): - start_idx = i * slice_size - end_idx = (i + 1) * slice_size - - query_slice = query[start_idx:end_idx] - key_slice = key[start_idx:end_idx] - - if self.upcast_attention: - query_slice = query_slice.float() - key_slice = key_slice.float() - - attn_slice = torch.baddbmm( - torch.empty(slice_size, query.shape[1], key.shape[1], dtype=query_slice.dtype, device=query.device), - query_slice, - key_slice.transpose(-1, -2), - beta=0, - alpha=self.scale, - ) - - if attention_mask is not None: - attn_slice = attn_slice + attention_mask[start_idx:end_idx] - - if self.upcast_softmax: - attn_slice = attn_slice.float() - - attn_slice = attn_slice.softmax(dim=-1) - - # cast back to the original dtype - attn_slice = attn_slice.to(value.dtype) - attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx]) - - hidden_states[start_idx:end_idx] = attn_slice - - # reshape hidden_states - hidden_states = self.reshape_batch_dim_to_heads(hidden_states) - return hidden_states - - def _memory_efficient_attention_xformers(self, query, key, value, attention_mask): - # TODO attention_mask - query = query.contiguous() - key = key.contiguous() - value = value.contiguous() - hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=attention_mask) - hidden_states = self.reshape_batch_dim_to_heads(hidden_states) - return hidden_states - - def forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None, video_length=None): - batch_size, sequence_length, _ = hidden_states.shape - - encoder_hidden_states = encoder_hidden_states - - if self.group_norm is not None: - hidden_states = self.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) - - query = self.to_q(hidden_states) # (bf) x d(hw) x c - dim = query.shape[-1] - - query = self.reshape_heads_to_batch_dim(query) - - if self.added_kv_proj_dim is not None: - raise NotImplementedError - - encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states - key = self.to_k(encoder_hidden_states) - value = self.to_v(encoder_hidden_states) - - curr_frame_index = torch.arange(video_length) - - key = rearrange(key, "(b f) d c -> b f d c", f=video_length) - - key = key[:, curr_frame_index] - key = rearrange(key, "b f d c -> (b f) d c") - - value = rearrange(value, "(b f) d c -> b f d c", f=video_length) - - value = value[:, curr_frame_index] - value = rearrange(value, "b f d c -> (b f) d c") - - key = self.reshape_heads_to_batch_dim(key) - value = self.reshape_heads_to_batch_dim(value) - - if attention_mask is not None: - if attention_mask.shape[-1] != query.shape[1]: - target_length = query.shape[1] - attention_mask = F.pad(attention_mask, (0, target_length), value=0.0) - attention_mask = attention_mask.repeat_interleave(self.heads, dim=0) - - # attention, what we cannot get enough of - if self._use_memory_efficient_attention_xformers: - hidden_states = self._memory_efficient_attention_xformers(query, key, value, attention_mask) - # Some versions of xformers return output in fp32, cast it back to the dtype of the input - hidden_states = hidden_states.to(query.dtype) - else: - if self._slice_size is None or query.shape[0] // self._slice_size == 1: - hidden_states = self._attention(query, key, value, attention_mask) - else: - hidden_states = self._sliced_attention(query, key, value, sequence_length, dim, attention_mask) - - # linear proj - hidden_states = self.to_out[0](hidden_states) - - # dropout - hidden_states = self.to_out[1](hidden_states) - return hidden_states diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/cookie/index.d.ts b/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/cookie/index.d.ts deleted file mode 100644 index a9690c3f0ea80fc3e9ec38211e5b42052500a221..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/cookie/index.d.ts +++ /dev/null @@ -1,135 +0,0 @@ -// Type definitions for cookie 0.4 -// Project: https://github.com/jshttp/cookie -// Definitions by: Pine Mizune -// Piotr Błażejewicz -// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped - -/** - * Basic HTTP cookie parser and serializer for HTTP servers. - */ - -/** - * Additional serialization options - */ -export interface CookieSerializeOptions { - /** - * Specifies the value for the {@link https://tools.ietf.org/html/rfc6265#section-5.2.3|Domain Set-Cookie attribute}. By default, no - * domain is set, and most clients will consider the cookie to apply to only - * the current domain. - */ - domain?: string | undefined; - - /** - * Specifies a function that will be used to encode a cookie's value. Since - * value of a cookie has a limited character set (and must be a simple - * string), this function can be used to encode a value into a string suited - * for a cookie's value. - * - * The default function is the global `encodeURIComponent`, which will - * encode a JavaScript string into UTF-8 byte sequences and then URL-encode - * any that fall outside of the cookie range. - */ - encode?(value: string): string; - - /** - * Specifies the `Date` object to be the value for the {@link https://tools.ietf.org/html/rfc6265#section-5.2.1|`Expires` `Set-Cookie` attribute}. By default, - * no expiration is set, and most clients will consider this a "non-persistent cookie" and will delete - * it on a condition like exiting a web browser application. - * - * *Note* the {@link https://tools.ietf.org/html/rfc6265#section-5.3|cookie storage model specification} - * states that if both `expires` and `maxAge` are set, then `maxAge` takes precedence, but it is - * possible not all clients by obey this, so if both are set, they should - * point to the same date and time. - */ - expires?: Date | undefined; - /** - * Specifies the boolean value for the {@link https://tools.ietf.org/html/rfc6265#section-5.2.6|`HttpOnly` `Set-Cookie` attribute}. - * When truthy, the `HttpOnly` attribute is set, otherwise it is not. By - * default, the `HttpOnly` attribute is not set. - * - * *Note* be careful when setting this to true, as compliant clients will - * not allow client-side JavaScript to see the cookie in `document.cookie`. - */ - httpOnly?: boolean | undefined; - /** - * Specifies the number (in seconds) to be the value for the `Max-Age` - * `Set-Cookie` attribute. The given number will be converted to an integer - * by rounding down. By default, no maximum age is set. - * - * *Note* the {@link https://tools.ietf.org/html/rfc6265#section-5.3|cookie storage model specification} - * states that if both `expires` and `maxAge` are set, then `maxAge` takes precedence, but it is - * possible not all clients by obey this, so if both are set, they should - * point to the same date and time. - */ - maxAge?: number | undefined; - /** - * Specifies the value for the {@link https://tools.ietf.org/html/rfc6265#section-5.2.4|`Path` `Set-Cookie` attribute}. - * By default, the path is considered the "default path". - */ - path?: string | undefined; - /** - * Specifies the boolean or string to be the value for the {@link https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-4.1.2.7|`SameSite` `Set-Cookie` attribute}. - * - * - `true` will set the `SameSite` attribute to `Strict` for strict same - * site enforcement. - * - `false` will not set the `SameSite` attribute. - * - `'lax'` will set the `SameSite` attribute to Lax for lax same site - * enforcement. - * - `'strict'` will set the `SameSite` attribute to Strict for strict same - * site enforcement. - * - `'none'` will set the SameSite attribute to None for an explicit - * cross-site cookie. - * - * More information about the different enforcement levels can be found in {@link https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-4.1.2.7|the specification}. - * - * *note* This is an attribute that has not yet been fully standardized, and may change in the future. This also means many clients may ignore this attribute until they understand it. - */ - sameSite?: true | false | 'lax' | 'strict' | 'none' | undefined; - /** - * Specifies the boolean value for the {@link https://tools.ietf.org/html/rfc6265#section-5.2.5|`Secure` `Set-Cookie` attribute}. When truthy, the - * `Secure` attribute is set, otherwise it is not. By default, the `Secure` attribute is not set. - * - * *Note* be careful when setting this to `true`, as compliant clients will - * not send the cookie back to the server in the future if the browser does - * not have an HTTPS connection. - */ - secure?: boolean | undefined; -} - -/** - * Additional parsing options - */ -export interface CookieParseOptions { - /** - * Specifies a function that will be used to decode a cookie's value. Since - * the value of a cookie has a limited character set (and must be a simple - * string), this function can be used to decode a previously-encoded cookie - * value into a JavaScript string or other object. - * - * The default function is the global `decodeURIComponent`, which will decode - * any URL-encoded sequences into their byte representations. - * - * *Note* if an error is thrown from this function, the original, non-decoded - * cookie value will be returned as the cookie's value. - */ - decode?(value: string): string; -} - -/** - * Parse an HTTP Cookie header string and returning an object of all cookie - * name-value pairs. - * - * @param str the string representing a `Cookie` header value - * @param [options] object containing parsing options - */ -export function parse(str: string, options?: CookieParseOptions): { [key: string]: string }; - -/** - * Serialize a cookie name-value pair into a `Set-Cookie` header string. - * - * @param name the name for the cookie - * @param value value to set the cookie to - * @param [options] object containing serialization options - * @throws {TypeError} when `maxAge` options is invalid - */ -export function serialize(name: string, value: string, options?: CookieSerializeOptions): string; diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/timers/promises.d.ts b/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/timers/promises.d.ts deleted file mode 100644 index c1450684d60a323526a9ae750669adb21ba75c17..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/timers/promises.d.ts +++ /dev/null @@ -1,93 +0,0 @@ -/** - * The `timers/promises` API provides an alternative set of timer functions - * that return `Promise` objects. The API is accessible via`require('timers/promises')`. - * - * ```js - * import { - * setTimeout, - * setImmediate, - * setInterval, - * } from 'timers/promises'; - * ``` - * @since v15.0.0 - */ -declare module 'timers/promises' { - import { TimerOptions } from 'node:timers'; - /** - * ```js - * import { - * setTimeout, - * } from 'timers/promises'; - * - * const res = await setTimeout(100, 'result'); - * - * console.log(res); // Prints 'result' - * ``` - * @since v15.0.0 - * @param [delay=1] The number of milliseconds to wait before fulfilling the promise. - * @param value A value with which the promise is fulfilled. - */ - function setTimeout(delay?: number, value?: T, options?: TimerOptions): Promise; - /** - * ```js - * import { - * setImmediate, - * } from 'timers/promises'; - * - * const res = await setImmediate('result'); - * - * console.log(res); // Prints 'result' - * ``` - * @since v15.0.0 - * @param value A value with which the promise is fulfilled. - */ - function setImmediate(value?: T, options?: TimerOptions): Promise; - /** - * Returns an async iterator that generates values in an interval of `delay` ms. - * - * ```js - * import { - * setInterval, - * } from 'timers/promises'; - * - * const interval = 100; - * for await (const startTime of setInterval(interval, Date.now())) { - * const now = Date.now(); - * console.log(now); - * if ((now - startTime) > 1000) - * break; - * } - * console.log(Date.now()); - * ``` - * @since v15.9.0 - */ - function setInterval(delay?: number, value?: T, options?: TimerOptions): AsyncIterable; - - interface Scheduler { - /** - * ```js - * import { scheduler } from 'node:timers/promises'; - * - * await scheduler.wait(1000); // Wait one second before continuing - * ``` - * An experimental API defined by the Scheduling APIs draft specification being developed as a standard Web Platform API. - * Calling timersPromises.scheduler.wait(delay, options) is roughly equivalent to calling timersPromises.setTimeout(delay, undefined, options) except that the ref option is not supported. - * @since v16.14.0 - * @experimental - * @param [delay=1] The number of milliseconds to wait before fulfilling the promise. - */ - wait: (delay?: number, options?: TimerOptions) => Promise; - /** - * An experimental API defined by the Scheduling APIs draft specification being developed as a standard Web Platform API. - * Calling timersPromises.scheduler.yield() is equivalent to calling timersPromises.setImmediate() with no arguments. - * @since v16.14.0 - * @experimental - */ - yield: () => Promise; - } - - const scheduler: Scheduler; -} -declare module 'node:timers/promises' { - export * from 'timers/promises'; -} diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/content-disposition/README.md b/spaces/fffiloni/controlnet-animation-doodle/node_modules/content-disposition/README.md deleted file mode 100644 index 3a0bb055949cdaed008f0f85e111624214213873..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/content-disposition/README.md +++ /dev/null @@ -1,142 +0,0 @@ -# content-disposition - -[![NPM Version][npm-image]][npm-url] -[![NPM Downloads][downloads-image]][downloads-url] -[![Node.js Version][node-version-image]][node-version-url] -[![Build Status][github-actions-ci-image]][github-actions-ci-url] -[![Test Coverage][coveralls-image]][coveralls-url] - -Create and parse HTTP `Content-Disposition` header - -## Installation - -```sh -$ npm install content-disposition -``` - -## API - -```js -var contentDisposition = require('content-disposition') -``` - -### contentDisposition(filename, options) - -Create an attachment `Content-Disposition` header value using the given file name, -if supplied. The `filename` is optional and if no file name is desired, but you -want to specify `options`, set `filename` to `undefined`. - -```js -res.setHeader('Content-Disposition', contentDisposition('∫ maths.pdf')) -``` - -**note** HTTP headers are of the ISO-8859-1 character set. If you are writing this -header through a means different from `setHeader` in Node.js, you'll want to specify -the `'binary'` encoding in Node.js. - -#### Options - -`contentDisposition` accepts these properties in the options object. - -##### fallback - -If the `filename` option is outside ISO-8859-1, then the file name is actually -stored in a supplemental field for clients that support Unicode file names and -a ISO-8859-1 version of the file name is automatically generated. - -This specifies the ISO-8859-1 file name to override the automatic generation or -disables the generation all together, defaults to `true`. - - - A string will specify the ISO-8859-1 file name to use in place of automatic - generation. - - `false` will disable including a ISO-8859-1 file name and only include the - Unicode version (unless the file name is already ISO-8859-1). - - `true` will enable automatic generation if the file name is outside ISO-8859-1. - -If the `filename` option is ISO-8859-1 and this option is specified and has a -different value, then the `filename` option is encoded in the extended field -and this set as the fallback field, even though they are both ISO-8859-1. - -##### type - -Specifies the disposition type, defaults to `"attachment"`. This can also be -`"inline"`, or any other value (all values except inline are treated like -`attachment`, but can convey additional information if both parties agree to -it). The type is normalized to lower-case. - -### contentDisposition.parse(string) - -```js -var disposition = contentDisposition.parse('attachment; filename="EURO rates.txt"; filename*=UTF-8\'\'%e2%82%ac%20rates.txt') -``` - -Parse a `Content-Disposition` header string. This automatically handles extended -("Unicode") parameters by decoding them and providing them under the standard -parameter name. This will return an object with the following properties (examples -are shown for the string `'attachment; filename="EURO rates.txt"; filename*=UTF-8\'\'%e2%82%ac%20rates.txt'`): - - - `type`: The disposition type (always lower case). Example: `'attachment'` - - - `parameters`: An object of the parameters in the disposition (name of parameter - always lower case and extended versions replace non-extended versions). Example: - `{filename: "€ rates.txt"}` - -## Examples - -### Send a file for download - -```js -var contentDisposition = require('content-disposition') -var destroy = require('destroy') -var fs = require('fs') -var http = require('http') -var onFinished = require('on-finished') - -var filePath = '/path/to/public/plans.pdf' - -http.createServer(function onRequest (req, res) { - // set headers - res.setHeader('Content-Type', 'application/pdf') - res.setHeader('Content-Disposition', contentDisposition(filePath)) - - // send file - var stream = fs.createReadStream(filePath) - stream.pipe(res) - onFinished(res, function () { - destroy(stream) - }) -}) -``` - -## Testing - -```sh -$ npm test -``` - -## References - -- [RFC 2616: Hypertext Transfer Protocol -- HTTP/1.1][rfc-2616] -- [RFC 5987: Character Set and Language Encoding for Hypertext Transfer Protocol (HTTP) Header Field Parameters][rfc-5987] -- [RFC 6266: Use of the Content-Disposition Header Field in the Hypertext Transfer Protocol (HTTP)][rfc-6266] -- [Test Cases for HTTP Content-Disposition header field (RFC 6266) and the Encodings defined in RFCs 2047, 2231 and 5987][tc-2231] - -[rfc-2616]: https://tools.ietf.org/html/rfc2616 -[rfc-5987]: https://tools.ietf.org/html/rfc5987 -[rfc-6266]: https://tools.ietf.org/html/rfc6266 -[tc-2231]: http://greenbytes.de/tech/tc2231/ - -## License - -[MIT](LICENSE) - -[npm-image]: https://img.shields.io/npm/v/content-disposition.svg -[npm-url]: https://npmjs.org/package/content-disposition -[node-version-image]: https://img.shields.io/node/v/content-disposition.svg -[node-version-url]: https://nodejs.org/en/download -[coveralls-image]: https://img.shields.io/coveralls/jshttp/content-disposition.svg -[coveralls-url]: https://coveralls.io/r/jshttp/content-disposition?branch=master -[downloads-image]: https://img.shields.io/npm/dm/content-disposition.svg -[downloads-url]: https://npmjs.org/package/content-disposition -[github-actions-ci-image]: https://img.shields.io/github/workflow/status/jshttp/content-disposition/ci/master?label=ci -[github-actions-ci-url]: https://github.com/jshttp/content-disposition?query=workflow%3Aci diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/utils-merge/README.md b/spaces/fffiloni/controlnet-animation-doodle/node_modules/utils-merge/README.md deleted file mode 100644 index 0cb71171255f78b087185dba6e09f7bb6a74dd98..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/utils-merge/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# utils-merge - -[![Version](https://img.shields.io/npm/v/utils-merge.svg?label=version)](https://www.npmjs.com/package/utils-merge) -[![Build](https://img.shields.io/travis/jaredhanson/utils-merge.svg)](https://travis-ci.org/jaredhanson/utils-merge) -[![Quality](https://img.shields.io/codeclimate/github/jaredhanson/utils-merge.svg?label=quality)](https://codeclimate.com/github/jaredhanson/utils-merge) -[![Coverage](https://img.shields.io/coveralls/jaredhanson/utils-merge.svg)](https://coveralls.io/r/jaredhanson/utils-merge) -[![Dependencies](https://img.shields.io/david/jaredhanson/utils-merge.svg)](https://david-dm.org/jaredhanson/utils-merge) - - -Merges the properties from a source object into a destination object. - -## Install - -```bash -$ npm install utils-merge -``` - -## Usage - -```javascript -var a = { foo: 'bar' } - , b = { bar: 'baz' }; - -merge(a, b); -// => { foo: 'bar', bar: 'baz' } -``` - -## License - -[The MIT License](http://opensource.org/licenses/MIT) - -Copyright (c) 2013-2017 Jared Hanson <[http://jaredhanson.net/](http://jaredhanson.net/)> - - Sponsor diff --git a/spaces/fffiloni/controlnet-animation-doodle/public/apiCall.js b/spaces/fffiloni/controlnet-animation-doodle/public/apiCall.js deleted file mode 100644 index 5101cbfce001fbb0ff18d5561a71ed5652e0e260..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/public/apiCall.js +++ /dev/null @@ -1,142 +0,0 @@ -class GRAPI { - - constructor(URI, token){ - this.URI_endpoint = URI; - this.token = token; - - this.prompt_value = ''; - this.prompt_input = select('#prompt-inp') - .attribute('placeholder', 'PROMPT') - .input(this.set_prompt_value.bind(this)); - - this.neg_prompt_value = ''; - this.neg_prompt_input = select('#neg-prompt-inp') - .attribute('placeholder', 'NEGATIVE PROMPT') - .input(this.set_neg_prompt_value.bind(this)); - - this.call_API_btn = createButton('') - .mousePressed(this.call_API.bind(this)) - .attribute('title', 'Diffuse current frame') - .id('api-btn') - .parent('#inner-right'); - - this.running_API_btn = createButton('') - .id('running-api-btn') - .attribute('title', 'processing diffusion ...') - .parent('#inner-right') - .addClass('hide'); - - this.show_diffused = false; - this.showHide_diff_btn = createButton('') - .mousePressed(this.showHide_diff.bind(this)) - .attribute('title', 'Show/Hide diffused results ') - .id('show-hide-diff-btn') - .parent('#inner-left'); - - //this.API_log_txt = createDiv('logs') - //this.API_log_txt.id('api-logs') - - this.hiddenScribbleGraphics = createGraphics(width, height); - this.diffusedGraphics = createGraphics(width, height); - } - - set_prompt_value(){ - this.prompt_value = this.prompt_input.value(); - } - - set_neg_prompt_value(){ - this.neg_prompt_value = this.neg_prompt_input.value(); - } - - fakeCall(){ - this.running_API_btn.removeClass('hide'); - this.call_API_btn.addClass('hide'); - setTimeout(function(){ - this.call_API_btn.removeClass('hide'); - this.running_API_btn.addClass('hide'); - }.bind(this), 3000) - - } - - call_API(){ - - this.running_API_btn.removeClass('hide'); - this.call_API_btn.addClass('hide'); - - if(AS.framesList.length != 0) { - - console.log(AS.frame_displayed) - - this.hiddenScribbleGraphics.loadPixels(); - let frame1_data = this.hiddenScribbleGraphics.canvas.toDataURL('image/png'); - - let inputs = [ - this.prompt_value, - frame1_data, - this.neg_prompt_value - ]; - - this.query(inputs); - - console.log("API CALLED • Waiting for a response ... ") - console.log("PROMPT: " + this.prompt_value) - //this.API_log_txt.html('API CALLED • Waiting for a response ... ') - } - - } - - async query(data) { - - const response = await fetch(this.URI_endpoint, { - method: "POST", - body: JSON.stringify( - { "data": data } // data images to send - ), - headers: { "Content-Type": "application/json", - "Authorization": "Bearer " + this.token + "" } - }) - - .then(function(response) { return response.json(); }) - - .then(function(json_response){ - - console.log("got results"); - - //this.API_log_txt.html('got results • hit Play Anim button !') - //console.log(json_response.data); - - setTimeout(function(){AS.display_frame(AS.frame_displayed)}, 100); - - loadImage(json_response.data[0], function(diff_img){ - AS.framesList[AS.frame_displayed].diffused_data = diff_img; - }); - - console.log('stored'); - this.call_API_btn.removeClass('hide'); - this.running_API_btn.addClass('hide'); - if(this.show_diffused == false){ - this.showHide_diff() - } - - }.bind(this)); - } - - showHide_diff(){ - - if(this.show_diffused == false){ - - this.show_diffused = true; - this.showHide_diff_btn.html(''); - - } else if(this.show_diffused == true){ - - this.show_diffused = false; - this.showHide_diff_btn.html(''); - - } - - setTimeout(AS.display_frame(AS.frame_displayed), 1000); - - } - - } \ No newline at end of file diff --git a/spaces/flax-community/Multilingual-VQA/multiapp.py b/spaces/flax-community/Multilingual-VQA/multiapp.py deleted file mode 100644 index 3750723e703ee4795dd00062f00729d64690117e..0000000000000000000000000000000000000000 --- a/spaces/flax-community/Multilingual-VQA/multiapp.py +++ /dev/null @@ -1,17 +0,0 @@ -import streamlit as st -from session import _get_state - -class MultiApp: - def __init__(self, state): - self.apps = [] - self.state = state - - def add_app(self, title, func): - self.apps.append({"title": title, "function": func}) - - def run(self): - st.sidebar.header("Go To:") - app = st.sidebar.radio( - "", self.apps, format_func=lambda app: app["title"] - ) - app["function"](self.state) diff --git a/spaces/flax-sentence-embeddings/sentence-embeddings/backend/config.py b/spaces/flax-sentence-embeddings/sentence-embeddings/backend/config.py deleted file mode 100644 index 0386f4efa5ed011a388a3ca2c045f3e96d590681..0000000000000000000000000000000000000000 --- a/spaces/flax-sentence-embeddings/sentence-embeddings/backend/config.py +++ /dev/null @@ -1,14 +0,0 @@ -MODELS_ID = dict(distilroberta = 'flax-sentence-embeddings/st-codesearch-distilroberta-base', - mpnet = 'flax-sentence-embeddings/all_datasets_v3_mpnet-base', - minilm_l6 = 'flax-sentence-embeddings/all_datasets_v3_MiniLM-L6') - -QA_MODELS_ID = dict( - mpnet_asymmetric_qa = ['flax-sentence-embeddings/multi-QA_v1-mpnet-asymmetric-Q', - 'flax-sentence-embeddings/multi-QA_v1-mpnet-asymmetric-A'], - mpnet_qa='flax-sentence-embeddings/mpnet_stackexchange_v1', - distilbert_qa = 'flax-sentence-embeddings/multi-qa_v1-distilbert-cls_dot' -) - -SEARCH_MODELS_ID = dict( - distilbert_qa = 'flax-sentence-embeddings/multi-qa_v1-distilbert-cls_dot' -) \ No newline at end of file diff --git a/spaces/floriankrempl/mtg_rules_bot/mtg/training/__init__.py b/spaces/floriankrempl/mtg_rules_bot/mtg/training/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/freddyaboulton/EDSR-freddy/README.md b/spaces/freddyaboulton/EDSR-freddy/README.md deleted file mode 100644 index 020d91b01312581d8a7297d992143b281d055156..0000000000000000000000000000000000000000 --- a/spaces/freddyaboulton/EDSR-freddy/README.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: EDSR Keras -emoji: 🚀 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.1.4b3 -app_file: app.py -pinned: false -license: mit ---- - -This space is the demo for the EDSR (Enhanced Deep Residual Networks for Single Image Super-Resolution) model. This model surpassed the performace of the current available SOTA models. - -Paper Link - https://arxiv.org/pdf/1707.02921 - -Keras Example link - https://keras.io/examples/vision/edsr/ - - -TODO: - -Hack to make this work for any image size. Currently the model takes input of image size 150 x 150. -We pad the input image with transparant pixels so that it is a square image, which is a multiple of 150 x 150 -Then we chop the image into multiple 150 x 150 sub images -Upscale it and stich it together. - -The output image might look a bit off, because each sub-image dosent have data about other sub-images. -This approach assumes that the subimage has enough data about its surroundings diff --git a/spaces/ggwvits/vits-uma-genshin-honkai/text/__init__.py b/spaces/ggwvits/vits-uma-genshin-honkai/text/__init__.py deleted file mode 100644 index 663c4b6416affb53c9dc56dddbc8b2b65d4bf518..0000000000000000000000000000000000000000 --- a/spaces/ggwvits/vits-uma-genshin-honkai/text/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners -from text.symbols import symbols - - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence, clean_text - - -def cleaned_text_to_sequence(cleaned_text): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()] - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/gorkemgoknar/metayazar/app.py b/spaces/gorkemgoknar/metayazar/app.py deleted file mode 100644 index e0829d3a699e258dc08aa8e26b258ef655562354..0000000000000000000000000000000000000000 --- a/spaces/gorkemgoknar/metayazar/app.py +++ /dev/null @@ -1,16 +0,0 @@ -import gradio as gr - - -title = "Metayazar - Turkish AI Writer" -description = "Write something in Turkish and GPT2 will fill up the rest in Turkish. Bir kaç kelime yazın AI kalanını yazsın, daha kapsamlı örneğini www.metayazar.com üzerinde test edebilirsiniz." - - - -examples = [["Çok uzun zaman önce dağların ardında biri yaşarmış."], ["Buralar eskiden hep dutluktu."], ["Otur yeğenim, otur da konuşalım!"]] - -interface = gr.Interface.load("huggingface/gorkemgoknar/gpt2-turkish-writer", - description=description, - examples=examples -) - -interface.launch() diff --git a/spaces/gradio/HuBERT/examples/adaptive_span/README.md b/spaces/gradio/HuBERT/examples/adaptive_span/README.md deleted file mode 100644 index 913a87338633f8a790d70fe4133b8bd8b95a4c50..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/adaptive_span/README.md +++ /dev/null @@ -1,90 +0,0 @@ -# Adaptive Span - -Adaptive Span is a novel self-attention mechanism that can learn its optimal -attention span. This allows us to extend significantly the maximum context size -used in Transformer, while maintaining control over their memory footprint -and computational time. It uses the Truncated BPTT technique for training, -as in [transformerXL](https://github.com/pytorch/fairseq/blob/master/examples/truncated_bptt/README.md). - -Adaptive Span was introduced by paper: -[Adaptive Attention Span in Transformers](https://arxiv.org/abs/1905.07799), -which achieved state-of-the-art language modeling results at the time of publication. - -We manage to reproduce their result in fairseq and keep most of the -[original implementation](https://github.com/facebookresearch/adaptive-span) untouched. -You can refer to the their sweep file as well if any combination of hyperparameter is not clear. - -##### 0. Setup - -First you need to process the Enwik8 dataset, we use the pre-tokenized dataset -from [adaptive span paper](https://github.com/facebookresearch/adaptive-span/blob/master/get_data.sh). -You can download the dataset, and then run: -```bash -fairseq-preprocess --only-source --trainpref ~/data/enwik8/train.txt \ - --validpref ~/data/enwik8/valid.txt --testpref ~/data/enwik8/test.txt \ - --destdir ~/data/enwik8/data-bin/ --joined-dictionary --workers 20 -``` - -##### 1. Train a Adaptive Span model on Enwik8 - -We will train a 12-layer Adaptive Span model following the [hyperparameters -used in the original -paper](https://github.com/facebookresearch/adaptive-span/blob/master/experiments/enwik8.sh). - -The following command assumes 4 GPUs, so that the total batch size is 64 -sequences (4 x 16). Training should take 2-3 days on 4 V100 GPUs: -```bash -CUDA_VISIBLE_DEVICES=0,1,2,3 fairseq-train \ - --user-dir examples/adaptive_span \ - --data ~/data/enwik8/data-bin/ \ - --fp16 --fp16-no-flatten-grads --max-update 600000 \ - --task truncated_bptt_lm --tokens-per-sample 512 --arch adaptive_span \ - --n-layer 12 --d-model 512 --n-head 8 --d-inner 2048 --dropout 0.3 \ - --attn-span 8192 --optimizer adagrad_with_grad_clip --adagrad-clip 0.03 \ - --validate-interval-updates 1000 \ - --lr-scheduler fixed --warmup-updates 32000 --batch-size-valid 32 \ - --lr 0.07 --criterion adaptive_span_loss --batch-size 16 --update-freq 1 \ - --seed 2 --log-format json --log-interval 25 --aux-loss-scaler 5e-07 -``` -This should land around 1.05 on validation, 1.03 on test. You can lower the ---aux-loss-scaler for better performance (longer span). It gives ~0.03 bpc -improvement to the transformerXL baseline here. -If training on a single GPU, set `--update-freq=4` to accumulate 4x gradients -and simulate training on 4 GPUs. -You can also reproduce the transformerXL result on enwik8 using this code base. -It should land around 1.06 on test,matching the [original paper](https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/run_enwik8_base.sh). -You can try by -```bash -CUDA_VISIBLE_DEVICES=0,1,2,3 fairseq-train \ - --user-dir examples/truncated_bptt \ - ~/data/enwik8/data-bin/ \ - --task truncated_bptt_lm --fp16 --max-update 400000 \ - --tokens-per-sample 512 --arch transformer_xl --n-layer 12 \ - --d-model 512 --n-head 8 --d-head 64 --d-inner 2048 --dropout 0.1 \ - --dropatt 0.0 --mem-len 512 --optimizer adam --clip-norm 0.25 \ - --lr-scheduler cosine --warmup-updates 0 \ - --lr 0.0 --lr 0.00025 --batch-size 15 \ - --update-freq 1 --seed 2 --log-format json --log-interval 25 \ - --fp16 -``` - -##### 2. Evaluate -For Adaptive Span: -```bash -fairseq-eval-lm ~/data/enwik8/data-bin/ --path model/checkpoint_best.pt \ - --user-dir examples/adaptive_span \ - --task truncated_bptt_lm --batch-size 8 --tokens-per-sample 512 --gen-subset test -``` -For Transformer-XL evaluation: -```bash -fairseq-eval-lm ~/data/enwik8/data-bin/ --path model/checkpoint_best.pt \ - --user-dir examples/truncated_bptt/ --task truncated_bptt_lm --batch-size 8 \ - --tokens-per-sample 80 \ - --model-overrides '{"mem_len":2100,"clamp_len":820,"same_length":True}' \ - --gen-subset valid -``` - -*Note:* During training the model saw 512 tokens of context -(``--tokens-per-sample=512``), with batch size 8. These settings match the evaluation -settings from [the original -paper](https://github.com/facebookresearch/adaptive-span/blob/master/experiments/enwik8.sh). diff --git a/spaces/gradio/blocks_joined/README.md b/spaces/gradio/blocks_joined/README.md deleted file mode 100644 index 3ab21932f62b83206c180f21f64482583de7932a..0000000000000000000000000000000000000000 --- a/spaces/gradio/blocks_joined/README.md +++ /dev/null @@ -1,12 +0,0 @@ - ---- -title: blocks_joined -emoji: 🔥 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 4.1.2 -app_file: run.py -pinned: false -hf_oauth: true ---- diff --git a/spaces/gradio/bokeh_plots/app.py b/spaces/gradio/bokeh_plots/app.py deleted file mode 100644 index c8e68af8b37090ebb6a270bd3c2d9c3a1fe4e188..0000000000000000000000000000000000000000 --- a/spaces/gradio/bokeh_plots/app.py +++ /dev/null @@ -1,93 +0,0 @@ -import gradio as gr -import xyzservices.providers as xyz -from bokeh.plotting import figure -from bokeh.tile_providers import get_provider -from bokeh.models import ColumnDataSource, Whisker -from bokeh.plotting import figure -from bokeh.sampledata.autompg2 import autompg2 as df -from bokeh.sampledata.penguins import data -from bokeh.transform import factor_cmap, jitter, factor_mark - - -def get_plot(plot_type): - if plot_type == "map": - tile_provider = get_provider(xyz.OpenStreetMap.Mapnik) - plot = figure( - x_range=(-2000000, 6000000), - y_range=(-1000000, 7000000), - x_axis_type="mercator", - y_axis_type="mercator", - ) - plot.add_tile(tile_provider) - return plot - elif plot_type == "whisker": - classes = list(sorted(df["class"].unique())) - - p = figure( - height=400, - x_range=classes, - background_fill_color="#efefef", - title="Car class vs HWY mpg with quintile ranges", - ) - p.xgrid.grid_line_color = None - - g = df.groupby("class") - upper = g.hwy.quantile(0.80) - lower = g.hwy.quantile(0.20) - source = ColumnDataSource(data=dict(base=classes, upper=upper, lower=lower)) - - error = Whisker( - base="base", - upper="upper", - lower="lower", - source=source, - level="annotation", - line_width=2, - ) - error.upper_head.size = 20 - error.lower_head.size = 20 - p.add_layout(error) - - p.circle( - jitter("class", 0.3, range=p.x_range), - "hwy", - source=df, - alpha=0.5, - size=13, - line_color="white", - color=factor_cmap("class", "Light6", classes), - ) - return p - elif plot_type == "scatter": - - SPECIES = sorted(data.species.unique()) - MARKERS = ["hex", "circle_x", "triangle"] - - p = figure(title="Penguin size", background_fill_color="#fafafa") - p.xaxis.axis_label = "Flipper Length (mm)" - p.yaxis.axis_label = "Body Mass (g)" - - p.scatter( - "flipper_length_mm", - "body_mass_g", - source=data, - legend_group="species", - fill_alpha=0.4, - size=12, - marker=factor_mark("species", MARKERS, SPECIES), - color=factor_cmap("species", "Category10_3", SPECIES), - ) - - p.legend.location = "top_left" - p.legend.title = "Species" - return p - -with gr.Blocks() as demo: - with gr.Row(): - plot_type = gr.Radio(value="scatter", choices=["scatter", "whisker", "map"]) - plot = gr.Plot() - plot_type.change(get_plot, inputs=[plot_type], outputs=[plot]) - - -if __name__ == "__main__": - demo.launch() \ No newline at end of file diff --git a/spaces/gunti/ChatGPT4/app.py b/spaces/gunti/ChatGPT4/app.py deleted file mode 100644 index 119b1be22c9e79b16ac00069c023ed110b9093da..0000000000000000000000000000000000000000 --- a/spaces/gunti/ChatGPT4/app.py +++ /dev/null @@ -1,141 +0,0 @@ -import gradio as gr -import os -import json -import requests - -#Streaming endpoint -API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream" - -#Testing with my Open AI Key -OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") - -def predict(inputs, top_p, temperature, chat_counter, chatbot=[], history=[]): - - payload = { - "model": "gpt-4", - "messages": [{"role": "user", "content": f"{inputs}"}], - "temperature" : 1.0, - "top_p":1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0, - } - - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {OPENAI_API_KEY}" - } - - print(f"chat_counter - {chat_counter}") - if chat_counter != 0 : - messages=[] - for data in chatbot: - temp1 = {} - temp1["role"] = "user" - temp1["content"] = data[0] - temp2 = {} - temp2["role"] = "assistant" - temp2["content"] = data[1] - messages.append(temp1) - messages.append(temp2) - temp3 = {} - temp3["role"] = "user" - temp3["content"] = inputs - messages.append(temp3) - #messages - payload = { - "model": "gpt-4", - "messages": messages, #[{"role": "user", "content": f"{inputs}"}], - "temperature" : temperature, #1.0, - "top_p": top_p, #1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0, - } - - chat_counter+=1 - - history.append(inputs) - print(f"payload is - {payload}") - # make a POST request to the API endpoint using the requests.post method, passing in stream=True - response = requests.post(API_URL, headers=headers, json=payload, stream=True) - print(f"response code - {response}") - token_counter = 0 - partial_words = "" - - counter=0 - for chunk in response.iter_lines(): - #Skipping first chunk - if counter == 0: - counter+=1 - continue - #counter+=1 - # check whether each line is non-empty - if chunk.decode() : - chunk = chunk.decode() - # decode each line as response data is in bytes - if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']: - #if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0: - # break - partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"] - if token_counter == 0: - history.append(" " + partial_words) - else: - history[-1] = partial_words - chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list - token_counter+=1 - yield chat, history, chat_counter, response # resembles {chatbot: chat, state: history} - - -def reset_textbox(): - return gr.update(value='') - -title = """

      🔥GPT4 with ChatCompletions API +🚀Gradio-Streaming

      """ -description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form: -``` -User: -Assistant: -User: -Assistant: -... -``` -In this app, you can explore the outputs of a gpt-4 LLM. -""" - -theme = gr.themes.Default(primary_hue="green") - -with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} - #chatbot {height: 520px; overflow: auto;}""", - theme=theme) as demo: - gr.HTML(title) - gr.HTML("""

      🔥This Huggingface Gradio Demo provides you full access to GPT4 API (4096 token limit). 🎉🥳🎉You don't need any OPENAI API key🙌

      """) - gr.HTML('''
      Duplicate SpaceDuplicate the Space and run securely with your OpenAI API Key
      ''') - with gr.Column(elem_id = "col_container"): - #GPT4 API Key is provided by Huggingface - #openai_api_key = gr.Textbox(type='password', label="Enter only your GPT4 OpenAI API key here") - chatbot = gr.Chatbot(elem_id='chatbot') #c - inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") #t - state = gr.State([]) #s - with gr.Row(): - with gr.Column(scale=7): - b1 = gr.Button().style(full_width=True) - with gr.Column(scale=3): - server_status_code = gr.Textbox(label="Status code from OpenAI server", ) - - #inputs, top_p, temperature, top_k, repetition_penalty - with gr.Accordion("Parameters", open=False): - top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",) - temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",) - #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",) - #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", ) - chat_counter = gr.Number(value=0, visible=False, precision=0) - - inputs.submit( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key - b1.click( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key - b1.click(reset_textbox, [], [inputs]) - inputs.submit(reset_textbox, [], [inputs]) - - #gr.Markdown(description) - demo.queue(max_size=20, concurrency_count=10).launch(debug=True) diff --git a/spaces/hamacojr/CAT-Seg/open_clip/src/open_clip/__init__.py b/spaces/hamacojr/CAT-Seg/open_clip/src/open_clip/__init__.py deleted file mode 100644 index 3cf72e9280c90bdfeaced30750650ef0f9021c3d..0000000000000000000000000000000000000000 --- a/spaces/hamacojr/CAT-Seg/open_clip/src/open_clip/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD -from .factory import create_model, create_model_and_transforms, create_model_from_pretrained, get_tokenizer -from .factory import list_models, add_model_config, get_model_config, load_checkpoint -from .loss import ClipLoss -from .model import CLIP, CustomTextCLIP, CLIPTextCfg, CLIPVisionCfg,\ - convert_weights_to_lp, convert_weights_to_fp16, trace_model, get_cast_dtype -from .openai import load_openai_model, list_openai_models -from .pretrained import list_pretrained, list_pretrained_models_by_tag, list_pretrained_tags_by_model,\ - get_pretrained_url, download_pretrained_from_url, is_pretrained_cfg, get_pretrained_cfg, download_pretrained -from .tokenizer import SimpleTokenizer, tokenize -from .transform import image_transform, AugmentationCfg diff --git a/spaces/hamacojr/SAM-CAT-Seg/run.sh b/spaces/hamacojr/SAM-CAT-Seg/run.sh deleted file mode 100644 index 9dc719114b97b429c4eb7713a19a7c939aca0333..0000000000000000000000000000000000000000 --- a/spaces/hamacojr/SAM-CAT-Seg/run.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/sh - -gpus=4 -config=$1 -output=$2 - -if [ -z $config ] -then - echo "No config file found! Run with "sh run.sh [CONFIG_FILE] [OUTPUT_DIR] [OPTS]"" - exit 0 -fi - -if [ -z $output ] -then - echo "No output directory found! Run with "sh run.sh [CONFIG_FILE] [OUTPUT_DIR] [OPTS]"" - exit 0 -fi - -shift 2 -opts=${@} - -python train_net.py --config $config \ - --num-gpus $gpus \ - --dist-url "auto" \ - --resume \ - OUTPUT_DIR $output \ - $opts - -sh eval.sh $config $output $opts \ No newline at end of file diff --git a/spaces/harpreetsahota/RAQA-with-LlamaIndex-and-a-fine-tuned-GPT-35/app.py b/spaces/harpreetsahota/RAQA-with-LlamaIndex-and-a-fine-tuned-GPT-35/app.py deleted file mode 100644 index 814e433775e32e01f5ffe365b4afee41962019cf..0000000000000000000000000000000000000000 --- a/spaces/harpreetsahota/RAQA-with-LlamaIndex-and-a-fine-tuned-GPT-35/app.py +++ /dev/null @@ -1,68 +0,0 @@ -import os -import openai - -from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine -from llama_index.callbacks.base import CallbackManager -from llama_index import ( - LLMPredictor, - ServiceContext, - StorageContext, - load_index_from_storage, -) -from llama_index.llms import OpenAI -import chainlit as cl - - -openai.api_key = os.environ.get("OPENAI_API_KEY") - -try: - # rebuild storage context - storage_context = StorageContext.from_defaults(persist_dir="./storage") - # load index - index = load_index_from_storage(storage_context) -except: - from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader - - documents = SimpleDirectoryReader(input_files=["hitchhikers.pdf"]).load_data() - index = GPTVectorStoreIndex.from_documents(documents) - index.storage_context.persist() - - -@cl.on_chat_start -async def factory(): - llm_predictor = LLMPredictor( - llm=OpenAI( - temperature=0, - model="ft:gpt-3.5-turbo-0613:personal::7ru6l1bi", - streaming=True, - context_window=2048, - ), - ) - service_context = ServiceContext.from_defaults( - llm_predictor=llm_predictor, - chunk_size=512, - callback_manager=CallbackManager([cl.LlamaIndexCallbackHandler()]), - ) - - query_engine = index.as_query_engine( - service_context=service_context, - streaming=True, - ) - - cl.user_session.set("query_engine", query_engine) - - -@cl.on_message -async def main(message): - query_engine = cl.user_session.get("query_engine") # type: RetrieverQueryEngine - response = await cl.make_async(query_engine.query)(message) - - response_message = cl.Message(content="") - - for token in response.response_gen: - await response_message.stream_token(token=token) - - if response.response_txt: - response_message.content = response.response_txt - - await response_message.send() \ No newline at end of file diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/modeling/meta_arch/semantic_seg.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/modeling/meta_arch/semantic_seg.py deleted file mode 100644 index 2c41a7235cb9c578e2c6de5835854bdff7493616..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/modeling/meta_arch/semantic_seg.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import numpy as np -from typing import Dict -import fvcore.nn.weight_init as weight_init -import torch -from torch import nn -from torch.nn import functional as F - -from detectron2.layers import Conv2d, ShapeSpec -from detectron2.structures import ImageList -from detectron2.utils.registry import Registry - -from ..backbone import build_backbone -from ..postprocessing import sem_seg_postprocess -from .build import META_ARCH_REGISTRY - -__all__ = ["SemanticSegmentor", "SEM_SEG_HEADS_REGISTRY", "SemSegFPNHead", "build_sem_seg_head"] - - -SEM_SEG_HEADS_REGISTRY = Registry("SEM_SEG_HEADS") -SEM_SEG_HEADS_REGISTRY.__doc__ = """ -Registry for semantic segmentation heads, which make semantic segmentation predictions -from feature maps. -""" - - -@META_ARCH_REGISTRY.register() -class SemanticSegmentor(nn.Module): - """ - Main class for semantic segmentation architectures. - """ - - def __init__(self, cfg): - super().__init__() - self.backbone = build_backbone(cfg) - self.sem_seg_head = build_sem_seg_head(cfg, self.backbone.output_shape()) - self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1)) - self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1)) - - @property - def device(self): - return self.pixel_mean.device - - def forward(self, batched_inputs): - """ - Args: - batched_inputs: a list, batched outputs of :class:`DatasetMapper`. - Each item in the list contains the inputs for one image. - - For now, each item in the list is a dict that contains: - - * "image": Tensor, image in (C, H, W) format. - * "sem_seg": semantic segmentation ground truth - * Other information that's included in the original dicts, such as: - "height", "width" (int): the output resolution of the model, used in inference. - See :meth:`postprocess` for details. - - Returns: - list[dict]: - Each dict is the output for one input image. - The dict contains one key "sem_seg" whose value is a - Tensor that represents the - per-pixel segmentation prediced by the head. - The prediction has shape KxHxW that represents the logits of - each class for each pixel. - """ - images = [x["image"].to(self.device) for x in batched_inputs] - images = [(x - self.pixel_mean) / self.pixel_std for x in images] - images = ImageList.from_tensors(images, self.backbone.size_divisibility) - - features = self.backbone(images.tensor) - - if "sem_seg" in batched_inputs[0]: - targets = [x["sem_seg"].to(self.device) for x in batched_inputs] - targets = ImageList.from_tensors( - targets, self.backbone.size_divisibility, self.sem_seg_head.ignore_value - ).tensor - else: - targets = None - results, losses = self.sem_seg_head(features, targets) - - if self.training: - return losses - - processed_results = [] - for result, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes): - height = input_per_image.get("height") - width = input_per_image.get("width") - r = sem_seg_postprocess(result, image_size, height, width) - processed_results.append({"sem_seg": r}) - return processed_results - - -def build_sem_seg_head(cfg, input_shape): - """ - Build a semantic segmentation head from `cfg.MODEL.SEM_SEG_HEAD.NAME`. - """ - name = cfg.MODEL.SEM_SEG_HEAD.NAME - return SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape) - - -@SEM_SEG_HEADS_REGISTRY.register() -class SemSegFPNHead(nn.Module): - """ - A semantic segmentation head described in :paper:`PanopticFPN`. - It takes FPN features as input and merges information from all - levels of the FPN into single output. - """ - - def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): - super().__init__() - - # fmt: off - self.in_features = cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES - feature_strides = {k: v.stride for k, v in input_shape.items()} - feature_channels = {k: v.channels for k, v in input_shape.items()} - self.ignore_value = cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE - num_classes = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES - conv_dims = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM - self.common_stride = cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE - norm = cfg.MODEL.SEM_SEG_HEAD.NORM - self.loss_weight = cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT - # fmt: on - - self.scale_heads = [] - for in_feature in self.in_features: - head_ops = [] - head_length = max( - 1, int(np.log2(feature_strides[in_feature]) - np.log2(self.common_stride)) - ) - for k in range(head_length): - norm_module = nn.GroupNorm(32, conv_dims) if norm == "GN" else None - conv = Conv2d( - feature_channels[in_feature] if k == 0 else conv_dims, - conv_dims, - kernel_size=3, - stride=1, - padding=1, - bias=not norm, - norm=norm_module, - activation=F.relu, - ) - weight_init.c2_msra_fill(conv) - head_ops.append(conv) - if feature_strides[in_feature] != self.common_stride: - head_ops.append( - nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False) - ) - self.scale_heads.append(nn.Sequential(*head_ops)) - self.add_module(in_feature, self.scale_heads[-1]) - self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0) - weight_init.c2_msra_fill(self.predictor) - - def forward(self, features, targets=None): - """ - Returns: - In training, returns (None, dict of losses) - In inference, returns (CxHxW logits, {}) - """ - x = self.layers(features) - if self.training: - return None, self.losses(x, targets) - else: - x = F.interpolate( - x, scale_factor=self.common_stride, mode="bilinear", align_corners=False - ) - return x, {} - - def layers(self, features): - for i, f in enumerate(self.in_features): - if i == 0: - x = self.scale_heads[i](features[f]) - else: - x = x + self.scale_heads[i](features[f]) - x = self.predictor(x) - return x - - def losses(self, predictions, targets): - predictions = F.interpolate( - predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False - ) - loss = F.cross_entropy( - predictions, targets, reduction="mean", ignore_index=self.ignore_value - ) - losses = {"loss_sem_seg": loss * self.loss_weight} - return losses diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/projects/PointRend/point_rend/coarse_mask_head.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/projects/PointRend/point_rend/coarse_mask_head.py deleted file mode 100644 index 3f1cffb4c985dc3121a863eb7b378965b718a19d..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/projects/PointRend/point_rend/coarse_mask_head.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import fvcore.nn.weight_init as weight_init -import torch -from torch import nn -from torch.nn import functional as F - -from detectron2.layers import Conv2d, ShapeSpec -from detectron2.modeling import ROI_MASK_HEAD_REGISTRY - - -@ROI_MASK_HEAD_REGISTRY.register() -class CoarseMaskHead(nn.Module): - """ - A mask head with fully connected layers. Given pooled features it first reduces channels and - spatial dimensions with conv layers and then uses FC layers to predict coarse masks analogously - to the standard box head. - """ - - def __init__(self, cfg, input_shape: ShapeSpec): - """ - The following attributes are parsed from config: - conv_dim: the output dimension of the conv layers - fc_dim: the feature dimenstion of the FC layers - num_fc: the number of FC layers - output_side_resolution: side resolution of the output square mask prediction - """ - super(CoarseMaskHead, self).__init__() - - # fmt: off - self.num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES - conv_dim = cfg.MODEL.ROI_MASK_HEAD.CONV_DIM - self.fc_dim = cfg.MODEL.ROI_MASK_HEAD.FC_DIM - num_fc = cfg.MODEL.ROI_MASK_HEAD.NUM_FC - self.output_side_resolution = cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION - self.input_channels = input_shape.channels - self.input_h = input_shape.height - self.input_w = input_shape.width - # fmt: on - - self.conv_layers = [] - if self.input_channels > conv_dim: - self.reduce_channel_dim_conv = Conv2d( - self.input_channels, - conv_dim, - kernel_size=1, - stride=1, - padding=0, - bias=True, - activation=F.relu, - ) - self.conv_layers.append(self.reduce_channel_dim_conv) - - self.reduce_spatial_dim_conv = Conv2d( - conv_dim, conv_dim, kernel_size=2, stride=2, padding=0, bias=True, activation=F.relu - ) - self.conv_layers.append(self.reduce_spatial_dim_conv) - - input_dim = conv_dim * self.input_h * self.input_w - input_dim //= 4 - - self.fcs = [] - for k in range(num_fc): - fc = nn.Linear(input_dim, self.fc_dim) - self.add_module("coarse_mask_fc{}".format(k + 1), fc) - self.fcs.append(fc) - input_dim = self.fc_dim - - output_dim = self.num_classes * self.output_side_resolution * self.output_side_resolution - - self.prediction = nn.Linear(self.fc_dim, output_dim) - # use normal distribution initialization for mask prediction layer - nn.init.normal_(self.prediction.weight, std=0.001) - nn.init.constant_(self.prediction.bias, 0) - - for layer in self.conv_layers: - weight_init.c2_msra_fill(layer) - for layer in self.fcs: - weight_init.c2_xavier_fill(layer) - - def forward(self, x): - # unlike BaseMaskRCNNHead, this head only outputs intermediate - # features, because the features will be used later by PointHead. - N = x.shape[0] - x = x.view(N, self.input_channels, self.input_h, self.input_w) - for layer in self.conv_layers: - x = layer(x) - x = torch.flatten(x, start_dim=1) - for layer in self.fcs: - x = F.relu(layer(x)) - return self.prediction(x).view( - N, self.num_classes, self.output_side_resolution, self.output_side_resolution - ) diff --git a/spaces/hbestm/gpt-academic-play/crazy_functions/test_project/cpp/libJPG/jpgd.h b/spaces/hbestm/gpt-academic-play/crazy_functions/test_project/cpp/libJPG/jpgd.h deleted file mode 100644 index a1c0cac61839a6f66a42c341f50d5e36faad9a93..0000000000000000000000000000000000000000 --- a/spaces/hbestm/gpt-academic-play/crazy_functions/test_project/cpp/libJPG/jpgd.h +++ /dev/null @@ -1,316 +0,0 @@ -// jpgd.h - C++ class for JPEG decompression. -// Public domain, Rich Geldreich -#ifndef JPEG_DECODER_H -#define JPEG_DECODER_H - -#include -#include -#include - -namespace jpgd -{ - typedef unsigned char uint8; - typedef signed short int16; - typedef unsigned short uint16; - typedef unsigned int uint; - typedef signed int int32; - - // Loads a JPEG image from a memory buffer or a file. - // req_comps can be 1 (grayscale), 3 (RGB), or 4 (RGBA). - // On return, width/height will be set to the image's dimensions, and actual_comps will be set to the either 1 (grayscale) or 3 (RGB). - // Notes: For more control over where and how the source data is read, see the decompress_jpeg_image_from_stream() function below, or call the jpeg_decoder class directly. - // Requesting a 8 or 32bpp image is currently a little faster than 24bpp because the jpeg_decoder class itself currently always unpacks to either 8 or 32bpp. -// BEGIN EPIC MOD -//unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps); - unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format); -// END EPIC MOD - unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps); - - // Success/failure error codes. - enum jpgd_status - { - JPGD_SUCCESS = 0, JPGD_FAILED = -1, JPGD_DONE = 1, - JPGD_BAD_DHT_COUNTS = -256, JPGD_BAD_DHT_INDEX, JPGD_BAD_DHT_MARKER, JPGD_BAD_DQT_MARKER, JPGD_BAD_DQT_TABLE, - JPGD_BAD_PRECISION, JPGD_BAD_HEIGHT, JPGD_BAD_WIDTH, JPGD_TOO_MANY_COMPONENTS, - JPGD_BAD_SOF_LENGTH, JPGD_BAD_VARIABLE_MARKER, JPGD_BAD_DRI_LENGTH, JPGD_BAD_SOS_LENGTH, - JPGD_BAD_SOS_COMP_ID, JPGD_W_EXTRA_BYTES_BEFORE_MARKER, JPGD_NO_ARITHMITIC_SUPPORT, JPGD_UNEXPECTED_MARKER, - JPGD_NOT_JPEG, JPGD_UNSUPPORTED_MARKER, JPGD_BAD_DQT_LENGTH, JPGD_TOO_MANY_BLOCKS, - JPGD_UNDEFINED_QUANT_TABLE, JPGD_UNDEFINED_HUFF_TABLE, JPGD_NOT_SINGLE_SCAN, JPGD_UNSUPPORTED_COLORSPACE, - JPGD_UNSUPPORTED_SAMP_FACTORS, JPGD_DECODE_ERROR, JPGD_BAD_RESTART_MARKER, JPGD_ASSERTION_ERROR, - JPGD_BAD_SOS_SPECTRAL, JPGD_BAD_SOS_SUCCESSIVE, JPGD_STREAM_READ, JPGD_NOTENOUGHMEM - }; - - // Input stream interface. - // Derive from this class to read input data from sources other than files or memory. Set m_eof_flag to true when no more data is available. - // The decoder is rather greedy: it will keep on calling this method until its internal input buffer is full, or until the EOF flag is set. - // It the input stream contains data after the JPEG stream's EOI (end of image) marker it will probably be pulled into the internal buffer. - // Call the get_total_bytes_read() method to determine the actual size of the JPEG stream after successful decoding. - class jpeg_decoder_stream - { - public: - jpeg_decoder_stream() { } - virtual ~jpeg_decoder_stream() { } - - // The read() method is called when the internal input buffer is empty. - // Parameters: - // pBuf - input buffer - // max_bytes_to_read - maximum bytes that can be written to pBuf - // pEOF_flag - set this to true if at end of stream (no more bytes remaining) - // Returns -1 on error, otherwise return the number of bytes actually written to the buffer (which may be 0). - // Notes: This method will be called in a loop until you set *pEOF_flag to true or the internal buffer is full. - virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) = 0; - }; - - // stdio FILE stream class. - class jpeg_decoder_file_stream : public jpeg_decoder_stream - { - jpeg_decoder_file_stream(const jpeg_decoder_file_stream &); - jpeg_decoder_file_stream &operator =(const jpeg_decoder_file_stream &); - - FILE *m_pFile; - bool m_eof_flag, m_error_flag; - - public: - jpeg_decoder_file_stream(); - virtual ~jpeg_decoder_file_stream(); - - bool open(const char *Pfilename); - void close(); - - virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag); - }; - - // Memory stream class. - class jpeg_decoder_mem_stream : public jpeg_decoder_stream - { - const uint8 *m_pSrc_data; - uint m_ofs, m_size; - - public: - jpeg_decoder_mem_stream() : m_pSrc_data(NULL), m_ofs(0), m_size(0) { } - jpeg_decoder_mem_stream(const uint8 *pSrc_data, uint size) : m_pSrc_data(pSrc_data), m_ofs(0), m_size(size) { } - - virtual ~jpeg_decoder_mem_stream() { } - - bool open(const uint8 *pSrc_data, uint size); - void close() { m_pSrc_data = NULL; m_ofs = 0; m_size = 0; } - - virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag); - }; - - // Loads JPEG file from a jpeg_decoder_stream. - unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps); - - enum - { - JPGD_IN_BUF_SIZE = 8192, JPGD_MAX_BLOCKS_PER_MCU = 10, JPGD_MAX_HUFF_TABLES = 8, JPGD_MAX_QUANT_TABLES = 4, - JPGD_MAX_COMPONENTS = 4, JPGD_MAX_COMPS_IN_SCAN = 4, JPGD_MAX_BLOCKS_PER_ROW = 8192, JPGD_MAX_HEIGHT = 16384, JPGD_MAX_WIDTH = 16384 - }; - - typedef int16 jpgd_quant_t; - typedef int16 jpgd_block_t; - - class jpeg_decoder - { - public: - // Call get_error_code() after constructing to determine if the stream is valid or not. You may call the get_width(), get_height(), etc. - // methods after the constructor is called. You may then either destruct the object, or begin decoding the image by calling begin_decoding(), then decode() on each scanline. - jpeg_decoder(jpeg_decoder_stream *pStream); - - ~jpeg_decoder(); - - // Call this method after constructing the object to begin decompression. - // If JPGD_SUCCESS is returned you may then call decode() on each scanline. - int begin_decoding(); - - // Returns the next scan line. - // For grayscale images, pScan_line will point to a buffer containing 8-bit pixels (get_bytes_per_pixel() will return 1). - // Otherwise, it will always point to a buffer containing 32-bit RGBA pixels (A will always be 255, and get_bytes_per_pixel() will return 4). - // Returns JPGD_SUCCESS if a scan line has been returned. - // Returns JPGD_DONE if all scan lines have been returned. - // Returns JPGD_FAILED if an error occurred. Call get_error_code() for a more info. - int decode(const void** pScan_line, uint* pScan_line_len); - - inline jpgd_status get_error_code() const { return m_error_code; } - - inline int get_width() const { return m_image_x_size; } - inline int get_height() const { return m_image_y_size; } - - inline int get_num_components() const { return m_comps_in_frame; } - - inline int get_bytes_per_pixel() const { return m_dest_bytes_per_pixel; } - inline int get_bytes_per_scan_line() const { return m_image_x_size * get_bytes_per_pixel(); } - - // Returns the total number of bytes actually consumed by the decoder (which should equal the actual size of the JPEG file). - inline int get_total_bytes_read() const { return m_total_bytes_read; } - - private: - jpeg_decoder(const jpeg_decoder &); - jpeg_decoder &operator =(const jpeg_decoder &); - - typedef void (*pDecode_block_func)(jpeg_decoder *, int, int, int); - - struct huff_tables - { - bool ac_table; - uint look_up[256]; - uint look_up2[256]; - uint8 code_size[256]; - uint tree[512]; - }; - - struct coeff_buf - { - uint8 *pData; - int block_num_x, block_num_y; - int block_len_x, block_len_y; - int block_size; - }; - - struct mem_block - { - mem_block *m_pNext; - size_t m_used_count; - size_t m_size; - char m_data[1]; - }; - - jmp_buf m_jmp_state; - mem_block *m_pMem_blocks; - int m_image_x_size; - int m_image_y_size; - jpeg_decoder_stream *m_pStream; - int m_progressive_flag; - uint8 m_huff_ac[JPGD_MAX_HUFF_TABLES]; - uint8* m_huff_num[JPGD_MAX_HUFF_TABLES]; // pointer to number of Huffman codes per bit size - uint8* m_huff_val[JPGD_MAX_HUFF_TABLES]; // pointer to Huffman codes per bit size - jpgd_quant_t* m_quant[JPGD_MAX_QUANT_TABLES]; // pointer to quantization tables - int m_scan_type; // Gray, Yh1v1, Yh1v2, Yh2v1, Yh2v2 (CMYK111, CMYK4114 no longer supported) - int m_comps_in_frame; // # of components in frame - int m_comp_h_samp[JPGD_MAX_COMPONENTS]; // component's horizontal sampling factor - int m_comp_v_samp[JPGD_MAX_COMPONENTS]; // component's vertical sampling factor - int m_comp_quant[JPGD_MAX_COMPONENTS]; // component's quantization table selector - int m_comp_ident[JPGD_MAX_COMPONENTS]; // component's ID - int m_comp_h_blocks[JPGD_MAX_COMPONENTS]; - int m_comp_v_blocks[JPGD_MAX_COMPONENTS]; - int m_comps_in_scan; // # of components in scan - int m_comp_list[JPGD_MAX_COMPS_IN_SCAN]; // components in this scan - int m_comp_dc_tab[JPGD_MAX_COMPONENTS]; // component's DC Huffman coding table selector - int m_comp_ac_tab[JPGD_MAX_COMPONENTS]; // component's AC Huffman coding table selector - int m_spectral_start; // spectral selection start - int m_spectral_end; // spectral selection end - int m_successive_low; // successive approximation low - int m_successive_high; // successive approximation high - int m_max_mcu_x_size; // MCU's max. X size in pixels - int m_max_mcu_y_size; // MCU's max. Y size in pixels - int m_blocks_per_mcu; - int m_max_blocks_per_row; - int m_mcus_per_row, m_mcus_per_col; - int m_mcu_org[JPGD_MAX_BLOCKS_PER_MCU]; - int m_total_lines_left; // total # lines left in image - int m_mcu_lines_left; // total # lines left in this MCU - int m_real_dest_bytes_per_scan_line; - int m_dest_bytes_per_scan_line; // rounded up - int m_dest_bytes_per_pixel; // 4 (RGB) or 1 (Y) - huff_tables* m_pHuff_tabs[JPGD_MAX_HUFF_TABLES]; - coeff_buf* m_dc_coeffs[JPGD_MAX_COMPONENTS]; - coeff_buf* m_ac_coeffs[JPGD_MAX_COMPONENTS]; - int m_eob_run; - int m_block_y_mcu[JPGD_MAX_COMPONENTS]; - uint8* m_pIn_buf_ofs; - int m_in_buf_left; - int m_tem_flag; - bool m_eof_flag; - uint8 m_in_buf_pad_start[128]; - uint8 m_in_buf[JPGD_IN_BUF_SIZE + 128]; - uint8 m_in_buf_pad_end[128]; - int m_bits_left; - uint m_bit_buf; - int m_restart_interval; - int m_restarts_left; - int m_next_restart_num; - int m_max_mcus_per_row; - int m_max_blocks_per_mcu; - int m_expanded_blocks_per_mcu; - int m_expanded_blocks_per_row; - int m_expanded_blocks_per_component; - bool m_freq_domain_chroma_upsample; - int m_max_mcus_per_col; - uint m_last_dc_val[JPGD_MAX_COMPONENTS]; - jpgd_block_t* m_pMCU_coefficients; - int m_mcu_block_max_zag[JPGD_MAX_BLOCKS_PER_MCU]; - uint8* m_pSample_buf; - int m_crr[256]; - int m_cbb[256]; - int m_crg[256]; - int m_cbg[256]; - uint8* m_pScan_line_0; - uint8* m_pScan_line_1; - jpgd_status m_error_code; - bool m_ready_flag; - int m_total_bytes_read; - - void free_all_blocks(); - // BEGIN EPIC MOD - UE_NORETURN void stop_decoding(jpgd_status status); - // END EPIC MOD - void *alloc(size_t n, bool zero = false); - void word_clear(void *p, uint16 c, uint n); - void prep_in_buffer(); - void read_dht_marker(); - void read_dqt_marker(); - void read_sof_marker(); - void skip_variable_marker(); - void read_dri_marker(); - void read_sos_marker(); - int next_marker(); - int process_markers(); - void locate_soi_marker(); - void locate_sof_marker(); - int locate_sos_marker(); - void init(jpeg_decoder_stream * pStream); - void create_look_ups(); - void fix_in_buffer(); - void transform_mcu(int mcu_row); - void transform_mcu_expand(int mcu_row); - coeff_buf* coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y); - inline jpgd_block_t *coeff_buf_getp(coeff_buf *cb, int block_x, int block_y); - void load_next_row(); - void decode_next_row(); - void make_huff_table(int index, huff_tables *pH); - void check_quant_tables(); - void check_huff_tables(); - void calc_mcu_block_order(); - int init_scan(); - void init_frame(); - void process_restart(); - void decode_scan(pDecode_block_func decode_block_func); - void init_progressive(); - void init_sequential(); - void decode_start(); - void decode_init(jpeg_decoder_stream * pStream); - void H2V2Convert(); - void H2V1Convert(); - void H1V2Convert(); - void H1V1Convert(); - void gray_convert(); - void expanded_convert(); - void find_eoi(); - inline uint get_char(); - inline uint get_char(bool *pPadding_flag); - inline void stuff_char(uint8 q); - inline uint8 get_octet(); - inline uint get_bits(int num_bits); - inline uint get_bits_no_markers(int numbits); - inline int huff_decode(huff_tables *pH); - inline int huff_decode(huff_tables *pH, int& extrabits); - static inline uint8 clamp(int i); - static void decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y); - static void decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y); - static void decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y); - static void decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y); - }; - -} // namespace jpgd - -#endif // JPEG_DECODER_H diff --git a/spaces/hectorduran/wavescomparing/README.md b/spaces/hectorduran/wavescomparing/README.md deleted file mode 100644 index 938884b9bf551ba7a315e5d03c41ed918970e626..0000000000000000000000000000000000000000 --- a/spaces/hectorduran/wavescomparing/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Wavescomparing -emoji: 📈 -colorFrom: yellow -colorTo: blue -sdk: streamlit -sdk_version: 1.19.0 -app_file: app.py -pinned: false -license: cc-by-nc-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/hf4h/bio-chem-foundation-models/README.md b/spaces/hf4h/bio-chem-foundation-models/README.md deleted file mode 100644 index 54096e6362451f7aacc4927293ad31d2117b752e..0000000000000000000000000000000000000000 --- a/spaces/hf4h/bio-chem-foundation-models/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Explore Biology & Biochem Foundation Models -emoji: 🧬 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: true -duplicated_from: hf-ml4h/biomedical-language-models ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/hhalim/datavis-plotly/app.py b/spaces/hhalim/datavis-plotly/app.py deleted file mode 100644 index 69e116662af67e4ebad20c4628e3877621635867..0000000000000000000000000000000000000000 --- a/spaces/hhalim/datavis-plotly/app.py +++ /dev/null @@ -1,56 +0,0 @@ -import streamlit as st -import numpy as np -import plotly.express as px -import pandas as pd -import plotly.graph_objects as go - -st.set_page_config(page_title="Plotly Graphing Libraries",layout='wide') - -# https://plotly.com/python/treemaps/ - -df = px.data.tips() -fig = px.treemap(df, path=[px.Constant("all"), 'sex', 'day', 'time'], - values='total_bill', color='time', - color_discrete_map={'(?)':'lightgrey', 'Lunch':'gold', 'Dinner':'darkblue'}) -fig.update_layout(margin = dict(t=50, l=25, r=25, b=25)) -#fig.show() -fig.update_traces(marker=dict(cornerradius=5)) - -st.plotly_chart(fig, use_container_width=True) - - -df = px.data.gapminder().query("year == 2007") -fig = px.treemap(df, path=[px.Constant("world"), 'continent', 'country'], values='pop', - color='lifeExp', hover_data=['iso_alpha'], - color_continuous_scale='RdBu', - color_continuous_midpoint=np.average(df['lifeExp'], weights=df['pop'])) -fig.update_layout(margin = dict(t=50, l=25, r=25, b=25)) -#fig.show() -st.plotly_chart(fig, use_container_width=True) - - -df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/96c0bd/sunburst-coffee-flavors-complete.csv') -fig = go.Figure(go.Treemap( - ids = df.ids, - labels = df.labels, - parents = df.parents, - pathbar_textfont_size=15, - root_color="lightgrey" -)) -fig.update_layout( - uniformtext=dict(minsize=10, mode='hide'), - margin = dict(t=50, l=25, r=25, b=25) -) -#fig.show() -st.plotly_chart(fig, use_container_width=True) - - -df = pd.read_pickle('bloom_dataset.pkl') -fig = px.treemap(df, path=[px.Constant("ROOTS"), 'Macroarea', 'Family', 'Genus', 'Language', 'dataset_name'], - values='num_bytes', maxdepth=4) -fig.update_traces(root_color="pink") -fig.update_layout(margin = dict(t=50, l=25, r=25, b=25)) - -st.plotly_chart(fig, use_container_width=True) - - diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/experiment_planning/old/__init__.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/experiment_planning/old/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/scripts_new/run_glacier_mtl_recon_1.sh b/spaces/ho11laqe/nnUNet_calvingfront_detection/scripts_new/run_glacier_mtl_recon_1.sh deleted file mode 100644 index 58808ff711cd8e8e6a23d1fa62d86f4d692ac813..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/scripts_new/run_glacier_mtl_recon_1.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -l -#SBATCH --nodes=1 --gres=gpu:1 --time=24:00:00 -#SBATCH --job-name=Task504_glacier_mtl_recon_1 - -export data_raw="/home/woody/iwi5/iwi5039h/data_raw" -export nnUNet_raw_data_base="/home/woody/iwi5/iwi5039h/nnUNet_data/nnUNet_raw_data_base/" -export nnUNet_preprocessed="/home/woody/iwi5/iwi5039h/nnUNet_data/nnUNet_preprocessed/" -export RESULTS_FOLDER="/home/woody/iwi5/iwi5039h/nnUNet_data/RESULTS_FOLDER" - -cd nnunet_glacer -pwd -conda activate nnunet - -python3 nnunet/dataset_conversion/Task504_Glacier_mtl_recon.py -data_percentage 100 -base $data_raw -python3 nnunet/experiment_planning/nnUNet_plan_and_preprocess.py -t 504 -pl3d None -pl2d ExperimentPlanner2D_mtl - -python3 nnunet/run/run_training.py 2d nnUNetTrainerMTLrecon 504 1 -p nnUNetPlans_mtl --disable_postprocessing_on_folds -python3 nnunet/inference/predict_simple.py -i $nnUNet_raw_data_base/nnUNet_raw_data/Task504_Glacier_mtl_recon/imagesTs -o $RESULTS_FOLDER/test_predictions/Task504_Glacier_mtl_recon/fold_1 -t 504 -m 2d -f 1 -p nnUNetPlans_mtl -tr nnUNetTrainerMTLrecon -python3 nnunet/dataset_conversion/Task504_Glacier_mtl_recon_reverse.py -i $RESULTS_FOLDER/test_predictions/Task504_Glacier_mtl_recon/fold_1 -python3 ./evaluate_nnUNet.py --predictions $RESULTS_FOLDER/test_predictions/Task504_Glacier_mtl_recon/fold_1/pngs --labels_fronts $data_raw/fronts/test --labels_zones $data_raw/zones/test --sar_images $data_raw/sar_images/test diff --git a/spaces/huggingface-projects/color-palette-generator-sd/frontend/svelte.config.js b/spaces/huggingface-projects/color-palette-generator-sd/frontend/svelte.config.js deleted file mode 100644 index f69ce2e1166832bdc6a4525e51f51b10c4f4abf1..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/color-palette-generator-sd/frontend/svelte.config.js +++ /dev/null @@ -1,26 +0,0 @@ -import adapter from '@sveltejs/adapter-static'; -import preprocess from 'svelte-preprocess'; - -const dev = process.env.NODE_ENV === 'development'; -console.log(dev) -/** @type {import('@sveltejs/kit').Config} */ -const config = { - preprocess: [ - preprocess({ - postcss: true - }) - ], - kit: { - paths: { - base: '/static' - }, - adapter: adapter({ - pages: 'build', - assets: 'build', - fallback: null, - precompress: false - }) - } -}; - -export default config; diff --git a/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/configs/wf42m_pfc02_16gpus_r50_bs8k.py b/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/configs/wf42m_pfc02_16gpus_r50_bs8k.py deleted file mode 100644 index c02bdf3afe8370086cf64fd112244b00cee35a6f..0000000000000000000000000000000000000000 --- a/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/configs/wf42m_pfc02_16gpus_r50_bs8k.py +++ /dev/null @@ -1,27 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.margin_list = (1.0, 0.0, 0.4) -config.network = "r50" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 0.2 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 512 -config.lr = 0.6 -config.verbose = 10000 -config.dali = False - -config.rec = "/train_tmp/WebFace42M" -config.num_classes = 2059906 -config.num_image = 42474557 -config.num_epoch = 20 -config.warmup_epoch = 4 -config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/spaces/innnky/nyaru-svc2.0/modules.py b/spaces/innnky/nyaru-svc2.0/modules.py deleted file mode 100644 index 9c7fd9cd6eb8b7e0ec0e08957e970744a374a924..0000000000000000000000000000000000000000 --- a/spaces/innnky/nyaru-svc2.0/modules.py +++ /dev/null @@ -1,390 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Midi Files Optimizer 7 Dongle __FULL__ Crack 35.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Midi Files Optimizer 7 Dongle __FULL__ Crack 35.md deleted file mode 100644 index e2d8e84e92a2830f2cd3f796b8dd52203aa5625c..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Midi Files Optimizer 7 Dongle __FULL__ Crack 35.md +++ /dev/null @@ -1,106 +0,0 @@ - -

      Midi Files Optimizer 7 Dongle Crack: A Complete Guide

      - -

      If you are a music producer or a MIDI enthusiast, you might have heard of Midi Files Optimizer 7, a powerful software that allows you to edit, optimize, and play MIDI files on your computer. Midi Files Optimizer 7 has many features that can enhance your MIDI experience, such as key editor, velocity editor, drum editor, transpose, tempo change, volume change, and more. However, there is one problem: Midi Files Optimizer 7 requires a dongle to work.

      - -

      A dongle is a small device that plugs into your computer's USB port and acts as a security key for the software. Without the dongle, Midi Files Optimizer 7 will not run. This can be frustrating for many users who want to use the software on different computers or who have lost or damaged their dongle. That's why some people look for ways to crack the dongle and bypass the security check.

      -

      Midi Files Optimizer 7 Dongle Crack 35


      Download Ziphttps://urlin.us/2uEw6X



      - -

      In this article, we will show you how to find and use Midi Files Optimizer 7 Dongle Crack, a tool that can emulate the dongle and make Midi Files Optimizer 7 work without it. We will also explain the risks and benefits of using Midi Files Optimizer 7 Dongle Crack, and how to optimize your MIDI files with Midi Files Optimizer 7. Let's get started!

      - -

      How to Find and Use Midi Files Optimizer 7 Dongle Crack

      - -

      Midi Files Optimizer 7 Dongle Crack is not an official product of Midiland, the company that develops Midi Files Optimizer 7. It is a third-party program that was created by hackers who managed to reverse-engineer the dongle and create a virtual version of it. Therefore, Midi Files Optimizer 7 Dongle Crack is not available on Midiland's website or any other legitimate source. You have to search for it on the internet, using keywords like "Midi Files Optimizer 7 Dongle Crack" or "Midi Files Optimizer 7 Dongle Emulator".

      - -

      However, be careful when downloading Midi Files Optimizer 7 Dongle Crack from unknown sources. Some websites may contain malware, viruses, or fake links that can harm your computer or steal your personal information. Always scan the files with an antivirus program before opening them, and avoid clicking on suspicious ads or pop-ups.

      - -

      Once you have downloaded Midi Files Optimizer 7 Dongle Crack, you need to install it on your computer. The installation process may vary depending on the version of Midi Files Optimizer 7 Dongle Crack you have, but usually it involves copying some files to the folder where Midi Files Optimizer 7 is installed, and running a patch or a keygen program. You may also need to disable your antivirus program temporarily, as some antivirus programs may detect Midi Files Optimizer 7 Dongle Crack as a threat and block it.

      - -

      After installing Midi Files Optimizer 7 Dongle Crack, you can launch Midi Files Optimizer 7 without plugging in the dongle. The software should recognize the virtual dongle and allow you to use all its features. However, keep in mind that Midi Files Optimizer 7 Dongle Crack may not work with all versions of Midi Files Optimizer 7, and it may cause some errors or glitches in the software. Also, using Midi Files Optimizer 7 Dongle Crack may violate Midiland's terms of service and copyright laws.

      - -

      The Risks and Benefits of Using Midi Files Optimizer 7 Dongle Crack

      - -

      Using Midi Files Optimizer 7 Dongle Crack has some advantages and disadvantages that you should consider before deciding whether to use it or not. Here are some of them:

      - -
        -
      • Benefits: -
          -
        • You can use Midi Files Optimizer 7 on any computer without needing the dongle.
        • -
        • You can save money by not buying a new dongle if you have lost or damaged yours.
        • -
        • You can access all the features of Midi Files Optimizer 7 without any limitations.
        • -
        -
      • -
      • Risks: -
          -
        • You may download malware or viruses along with Midi Files Optimizer 7 Dongle Crack.
        • -
        • You may experience errors or bugs in Midi Files Optimizer 7 due to the crack.
        • -
        • You may lose support and updates from Midiland if they detect that you are using a cracked version of their software.
        • -
        • You may face legal consequences if Midiland decides to take action against you for violating their intellectual property rights.
        • -
        -
      • -
      - -

      Ultimately, the choice is yours whether to use Midi Files Optimizer 7 Dongle Crack or not. However, we recommend that you respect Midiland's work and buy a legitimate copy of their software with a dongle. This way, you can enjoy Midi Files Optimizer 7 without any risks or ethical issues.

      - -

      How to Optimize Your MIDI Files with Midi Files Optimizer 7

      - -

      Whether you use Midi Files Optimizer 7 with or without a dongle crack, you can use it to optimize your MIDI files and make them sound better. Here are some tips on how to do that:

      -

      - -
        -
      • Edit the key and velocity: You can use the key editor and the velocity editor to change the pitch and volume of each note in your MIDI file. This can help you correct any mistakes or adjust the expression of your music.
      • -
      • Edit the drum tracks: You can use the drum editor to change the drum sounds and patterns in your MIDI file. You can choose from different drum kits and styles, or create your own custom drum tracks.
      • -
      • Transpose and change tempo: You can use the transpose and tempo functions to change the key and speed of your MIDI file. This can help you adapt your music to different instruments or singers.
      • -
      • Change volume and balance: You can use the volume and balance sliders to adjust the overall loudness and stereo position of each track in your MIDI file. This can help you create a balanced mix of your music.
      • -
      • Add effects: You can use the effects menu to add reverb, chorus, delay, flanger, phaser, distortion, and other effects to your MIDI file. This can help you enhance the sound quality and atmosphere of your music.
      • -
      - -

      Midi Files Optimizer 7 also has other features that can help you optimize your MIDI files, such as MIDI player, MIDI recorder, MIDI converter, MIDI merger, MIDI splitter, MIDI optimizer wizard, and more. You can explore these features by reading the user manual or watching some tutorials online.

      - -

      Conclusion

      - -

      Midi Files Optimizer 7 is a great software for editing, optimizing, and playing MIDI files on your computer. However, it requires a dongle to work properly. Some people try to crack the dongle and use Midi Files Optimizer 7 without it, but this comes with some risks and drawbacks. We recommend that you buy a legitimate copy of Midi Files Optimizer 7 with a dongle from Midiland's website or authorized dealers. This way, you can support Midiland's development and enjoy their software without any problems.

      - -

      We hope this article has helped you understand how to find and use Midi Files Optimizer 7 Dongle Crack, as well as how to optimize your MIDI files with Midi Files Optimizer 7. If you have any questions or comments about this topic, feel free to leave them below.

      -

      Where to Download Midi Files Optimizer 7 and How to Install It

      - -

      If you want to use Midi Files Optimizer 7, you need to download it from Midiland's website or from one of their authorized dealers. You can choose between the standard version and the pro version, depending on your needs and budget. The standard version costs 99 euros, while the pro version costs 149 euros. Both versions come with a dongle that you need to plug into your computer's USB port.

      - -

      To install Midi Files Optimizer 7, you need to follow these steps:

      - -
        -
      1. Insert the installation CD into your computer's CD drive and run the setup program.
      2. -
      3. Follow the instructions on the screen and choose the destination folder for the software.
      4. -
      5. When prompted, enter the serial number that came with your dongle.
      6. -
      7. Connect the dongle to your computer's USB port and wait for it to be recognized.
      8. -
      9. Finish the installation and launch Midi Files Optimizer 7.
      10. -
      - -

      You can also download Midi Files Optimizer 7 from Midiland's website if you have lost or damaged your installation CD. However, you still need the dongle and the serial number to activate the software.

      - -

      How to Use Midi Files Optimizer 7 to Edit and Play MIDI Files

      - -

      Midi Files Optimizer 7 is a user-friendly software that lets you edit and play MIDI files on your computer. You can use it to create your own MIDI files or to modify existing ones. You can also use it to play MIDI files with different sound fonts and synthesizers.

      - -

      To use Midi Files Optimizer 7, you need to follow these steps:

      - -
        -
      1. Launch Midi Files Optimizer 7 and open a MIDI file that you want to edit or play.
      2. -
      3. Use the toolbar and the menu bar to access different functions and features of the software.
      4. -
      5. Use the main window to view and edit the tracks, channels, notes, and events of your MIDI file.
      6. -
      7. Use the sub-windows to access the key editor, velocity editor, drum editor, effects, mixer, player, recorder, converter, merger, splitter, optimizer wizard, and other tools.
      8. -
      9. Save your changes and export your MIDI file as a new file or overwrite the original one.
      10. -
      - -

      You can also use Midi Files Optimizer 7 to open multiple MIDI files at once and switch between them using tabs. You can also drag and drop MIDI files from your computer or from other applications into Midi Files Optimizer 7.

      - -

      Conclusion

      - -

      Midi Files Optimizer 7 is a versatile software that can help you edit, optimize, and play MIDI files on your computer. However, it requires a dongle to work properly. Some people try to crack the dongle and use Midi Files Optimizer 7 without it, but this comes with some risks and drawbacks. We recommend that you buy a legitimate copy of Midi Files Optimizer 7 with a dongle from Midiland's website or authorized dealers. This way, you can support Midiland's development and enjoy their software without any problems.

      - -

      We hope this article has helped you understand how to use Midi Files Optimizer 7 Dongle Crack 35 as a keyword for writing an article. If you have any questions or comments about this topic, feel free to leave them below.

      -

      Midi Files Optimizer 7 is a great software for editing, optimizing, and playing MIDI files on your computer. However, it requires a dongle to work properly. Some people try to crack the dongle and use Midi Files Optimizer 7 without it, but this comes with some risks and drawbacks. We recommend that you buy a legitimate copy of Midi Files Optimizer 7 with a dongle from Midiland's website or authorized dealers. This way, you can support Midiland's development and enjoy their software without any problems.

      - -

      We hope this article has helped you understand how to find and use Midi Files Optimizer 7 Dongle Crack, as well as how to optimize your MIDI files with Midi Files Optimizer 7. If you have any questions or comments about this topic, feel free to leave them below.

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/BobCAD-CAM V25 Crack.rar.md b/spaces/inreVtussa/clothingai/Examples/BobCAD-CAM V25 Crack.rar.md deleted file mode 100644 index 249ff31bfc2f4ff1936fbbd2b8b142f41511c411..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/BobCAD-CAM V25 Crack.rar.md +++ /dev/null @@ -1,13 +0,0 @@ -

      BobCAD-CAM V25 Crack.rar


      DOWNLOAD ————— https://tiurll.com/2uCkwX



      - -BobCAD-CAM V25 !!INSTALL!! Crack.rar. 2020.12.11 23:40.関連記事. Ireal Pro Apk ((FULL)) Full 19. 2020.12.19 16:30 Dokidoki Little Landlady. Dokidoki Little Landlady (full) (v.10.5.). 2020.12.18 10:31. -Dokidoki Little Landlady (full) (v.9.5.) 2020.12.18 09:39. -Dokidoki Little Landlady (Full) (9.4.) -December 17, 2016 - BobCAD-CAM for V25 with crack and patch is released! -BobCAD-CAM for V25 with crack and patch is released! -BobCAD-CAM for V25 with crack and. -The latest Tweets from BOBCAD-CAM (@BobCAD_CAM). -[CAM] Software Innovations. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/iqovocn/ChuanhuChatGPT/modules/overwrites.py b/spaces/iqovocn/ChuanhuChatGPT/modules/overwrites.py deleted file mode 100644 index e029f4a50285c64dcb286a34cb1c3b2680880e05..0000000000000000000000000000000000000000 --- a/spaces/iqovocn/ChuanhuChatGPT/modules/overwrites.py +++ /dev/null @@ -1,93 +0,0 @@ -from __future__ import annotations -import logging - -from typing import List, Tuple -from gradio_client import utils as client_utils -from gradio import utils -import inspect - -from modules.presets import * -from modules.index_func import * - - -def postprocess( - self, - y: List[List[str | Tuple[str] | Tuple[str, str] | None] | Tuple], - ) -> List[List[str | Dict | None]]: - """ - Parameters: - y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed. - Returns: - List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed. - """ - if y is None: - return [] - processed_messages = [] - for message_pair in y: - assert isinstance( - message_pair, (tuple, list) - ), f"Expected a list of lists or list of tuples. Received: {message_pair}" - assert ( - len(message_pair) == 2 - ), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}" - - processed_messages.append( - [ - self._postprocess_chat_messages(message_pair[0], "user"), - self._postprocess_chat_messages(message_pair[1], "bot"), - ] - ) - return processed_messages - -def postprocess_chat_messages( - self, chat_message: str | tuple | list | None, role: str - ) -> str | dict | None: - if chat_message is None: - return None - elif isinstance(chat_message, (tuple, list)): - file_uri = chat_message[0] - if utils.validate_url(file_uri): - filepath = file_uri - else: - filepath = self.make_temp_copy_if_needed(file_uri) - - mime_type = client_utils.get_mimetype(filepath) - return { - "name": filepath, - "mime_type": mime_type, - "alt_text": chat_message[1] if len(chat_message) > 1 else None, - "data": None, # These last two fields are filled in by the frontend - "is_file": True, - } - elif isinstance(chat_message, str): - # chat_message = inspect.cleandoc(chat_message) - # escape html spaces - # chat_message = chat_message.replace(" ", " ") - if role == "bot": - chat_message = convert_bot_before_marked(chat_message) - elif role == "user": - chat_message = convert_user_before_marked(chat_message) - return chat_message - else: - raise ValueError(f"Invalid message for Chatbot component: {chat_message}") - -with open("./assets/custom.js", "r", encoding="utf-8") as f, \ - open("./assets/external-scripts.js", "r", encoding="utf-8") as f1: - customJS = f.read() - externalScripts = f1.read() - - -def reload_javascript(): - print("Reloading javascript...") - js = f'' - # if render_latex: - # js += """\""" - def template_response(*args, **kwargs): - res = GradioTemplateResponseOriginal(*args, **kwargs) - res.body = res.body.replace(b'', f'{js}'.encode("utf8")) - res.init_headers() - return res - - gr.routes.templates.TemplateResponse = template_response - -GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse \ No newline at end of file diff --git a/spaces/jackli888/stable-diffusion-webui/test/basic_features/__init__.py b/spaces/jackli888/stable-diffusion-webui/test/basic_features/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/jarvisbot/ChatImprovement/toolbox.py b/spaces/jarvisbot/ChatImprovement/toolbox.py deleted file mode 100644 index d57fee63275186c6eb63d44eef22f3537be1b5cf..0000000000000000000000000000000000000000 --- a/spaces/jarvisbot/ChatImprovement/toolbox.py +++ /dev/null @@ -1,140 +0,0 @@ -import markdown, mdtex2html, threading -from show_math import convert as convert_math -from functools import wraps - -def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]): - """ - 调用简单的predict_no_ui接口,但是依然保留了些许界面心跳功能,当对话太长时,会自动采用二分法截断 - """ - import time - try: from config_private import TIMEOUT_SECONDS, MAX_RETRY - except: from config import TIMEOUT_SECONDS, MAX_RETRY - from predict import predict_no_ui - mutable = [None, ''] - def mt(i_say, history): - while True: - try: - mutable[0] = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history) - break - except ConnectionAbortedError as e: - if len(history) > 0: - history = [his[len(his)//2:] for his in history if his is not None] - mutable[1] = 'Warning! History conversation is too long, cut into half. ' - else: - i_say = i_say[:len(i_say)//2] - mutable[1] = 'Warning! Input file is too long, cut into half. ' - except TimeoutError as e: - mutable[0] = '[Local Message] Failed with timeout' - - thread_name = threading.Thread(target=mt, args=(i_say, history)); thread_name.start() - cnt = 0 - while thread_name.is_alive(): - cnt += 1 - chatbot[-1] = (i_say_show_user, f"[Local Message] {mutable[1]}waiting gpt response {cnt}/{TIMEOUT_SECONDS*2*(MAX_RETRY+1)}"+''.join(['.']*(cnt%4))) - yield chatbot, history, '正常' - time.sleep(1) - gpt_say = mutable[0] - return gpt_say - -def write_results_to_file(history, file_name=None): - """ - 将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。 - """ - import os, time - if file_name is None: - file_name = time.strftime("chatGPT分析报告%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md' - os.makedirs('./gpt_log/', exist_ok=True) - with open(f'./gpt_log/{file_name}', 'w') as f: - f.write('# chatGPT 分析报告\n') - for i, content in enumerate(history): - if i%2==0: f.write('## ') - f.write(content) - f.write('\n\n') - res = '以上材料已经被写入' + os.path.abspath(f'./gpt_log/{file_name}') - print(res) - return res - -def regular_txt_to_markdown(text): - """ - 将普通文本转换为Markdown格式的文本。 - """ - text = text.replace('\n', '\n\n') - text = text.replace('\n\n\n', '\n\n') - text = text.replace('\n\n\n', '\n\n') - return text - -def CatchException(f): - """ - 装饰器函数,捕捉函数f中的异常并封装到一个生成器中返回,并显示到聊天当中。 - """ - @wraps(f) - def decorated(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - try: - yield from f(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT) - except Exception as e: - import traceback - from check_proxy import check_proxy - try: from config_private import proxies - except: from config import proxies - tb_str = regular_txt_to_markdown(traceback.format_exc()) - chatbot[-1] = (chatbot[-1][0], f"[Local Message] 实验性函数调用出错: \n\n {tb_str} \n\n 当前代理可用性: \n\n {check_proxy(proxies)}") - yield chatbot, history, f'异常 {e}' - return decorated - -def report_execption(chatbot, history, a, b): - """ - 向chatbot中添加错误信息 - """ - chatbot.append((a, b)) - history.append(a); history.append(b) - -def text_divide_paragraph(text): - """ - 将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。 - """ - if '```' in text: - # careful input - return text - else: - # wtf input - lines = text.split("\n") - for i, line in enumerate(lines): - if i!=0: lines[i] = "

      "+lines[i].replace(" ", " ")+"

      " - text = "".join(lines) - return text - -def markdown_convertion(txt): - """ - 将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。 - """ - if ('$' in txt) and ('```' not in txt): - return markdown.markdown(txt,extensions=['fenced_code','tables']) + '

      ' + \ - markdown.markdown(convert_math(txt, splitParagraphs=False),extensions=['fenced_code','tables']) - else: - return markdown.markdown(txt,extensions=['fenced_code','tables']) - - -def format_io(self, y): - """ - 将输入和输出解析为HTML格式。将y中最后一项的输入部分段落化,并将输出部分的Markdown和数学公式转换为HTML格式。 - """ - if y is None: return [] - i_ask, gpt_reply = y[-1] - i_ask = text_divide_paragraph(i_ask) # 输入部分太自由,预处理一波 - y[-1] = ( - None if i_ask is None else markdown.markdown(i_ask, extensions=['fenced_code','tables']), - None if gpt_reply is None else markdown_convertion(gpt_reply) - ) - return y - - -def find_free_port(): - """ - 返回当前系统中可用的未使用端口。 - """ - import socket - from contextlib import closing - with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: - s.bind(('', 0)) - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - return s.getsockname()[1] \ No newline at end of file diff --git a/spaces/jbilcke-hf/LifeSim/src/components/ui/dropdown-menu.tsx b/spaces/jbilcke-hf/LifeSim/src/components/ui/dropdown-menu.tsx deleted file mode 100644 index 5803489a1d197a9db5018e413e63abe84b2efb8e..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/LifeSim/src/components/ui/dropdown-menu.tsx +++ /dev/null @@ -1,200 +0,0 @@ -"use client" - -import * as React from "react" -import * as DropdownMenuPrimitive from "@radix-ui/react-dropdown-menu" -import { Check, ChevronRight, Circle } from "lucide-react" - -import { cn } from "@/lib/utils" - -const DropdownMenu = DropdownMenuPrimitive.Root - -const DropdownMenuTrigger = DropdownMenuPrimitive.Trigger - -const DropdownMenuGroup = DropdownMenuPrimitive.Group - -const DropdownMenuPortal = DropdownMenuPrimitive.Portal - -const DropdownMenuSub = DropdownMenuPrimitive.Sub - -const DropdownMenuRadioGroup = DropdownMenuPrimitive.RadioGroup - -const DropdownMenuSubTrigger = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef & { - inset?: boolean - } ->(({ className, inset, children, ...props }, ref) => ( - - {children} - - -)) -DropdownMenuSubTrigger.displayName = - DropdownMenuPrimitive.SubTrigger.displayName - -const DropdownMenuSubContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DropdownMenuSubContent.displayName = - DropdownMenuPrimitive.SubContent.displayName - -const DropdownMenuContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, sideOffset = 4, ...props }, ref) => ( - - - -)) -DropdownMenuContent.displayName = DropdownMenuPrimitive.Content.displayName - -const DropdownMenuItem = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef & { - inset?: boolean - } ->(({ className, inset, ...props }, ref) => ( - -)) -DropdownMenuItem.displayName = DropdownMenuPrimitive.Item.displayName - -const DropdownMenuCheckboxItem = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, checked, ...props }, ref) => ( - - - - - - - {children} - -)) -DropdownMenuCheckboxItem.displayName = - DropdownMenuPrimitive.CheckboxItem.displayName - -const DropdownMenuRadioItem = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - - - - - {children} - -)) -DropdownMenuRadioItem.displayName = DropdownMenuPrimitive.RadioItem.displayName - -const DropdownMenuLabel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef & { - inset?: boolean - } ->(({ className, inset, ...props }, ref) => ( - -)) -DropdownMenuLabel.displayName = DropdownMenuPrimitive.Label.displayName - -const DropdownMenuSeparator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DropdownMenuSeparator.displayName = DropdownMenuPrimitive.Separator.displayName - -const DropdownMenuShortcut = ({ - className, - ...props -}: React.HTMLAttributes) => { - return ( - - ) -} -DropdownMenuShortcut.displayName = "DropdownMenuShortcut" - -export { - DropdownMenu, - DropdownMenuTrigger, - DropdownMenuContent, - DropdownMenuItem, - DropdownMenuCheckboxItem, - DropdownMenuRadioItem, - DropdownMenuLabel, - DropdownMenuSeparator, - DropdownMenuShortcut, - DropdownMenuGroup, - DropdownMenuPortal, - DropdownMenuSub, - DropdownMenuSubContent, - DropdownMenuSubTrigger, - DropdownMenuRadioGroup, -} diff --git a/spaces/jbilcke-hf/ai-clip-factory/src/app/layout.tsx b/spaces/jbilcke-hf/ai-clip-factory/src/app/layout.tsx deleted file mode 100644 index c5e5534ff7abdcb51cabf58635f14cb6975af178..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/ai-clip-factory/src/app/layout.tsx +++ /dev/null @@ -1,34 +0,0 @@ -import { cn } from '@/lib/utils' -import './globals.css' -import type { Metadata } from 'next' -import { Inter } from 'next/font/google' - -const inter = Inter({ subsets: ['latin'] }) - -export const metadata: Metadata = { - // alternative names: - // giffer - // gipher - // GIF Factory - // GIF Magic - // AI GIF Genie - title: 'AI Clip Factory 🧞', - description: 'AI Clip Factory 🧞', -} - -export default function RootLayout({ - children, -}: { - children: React.ReactNode -}) { - return ( - - - {children} - - - ) -} diff --git a/spaces/jjourney1125/swin2sr/utils/util_calculate_psnr_ssim.py b/spaces/jjourney1125/swin2sr/utils/util_calculate_psnr_ssim.py deleted file mode 100644 index a8b5a0aeafec81cb3bb8a815ff739fc415ea03cb..0000000000000000000000000000000000000000 --- a/spaces/jjourney1125/swin2sr/utils/util_calculate_psnr_ssim.py +++ /dev/null @@ -1,320 +0,0 @@ -# ----------------------------------------------------------------------------------- -# https://github.com/JingyunLiang/SwinIR/blob/main/utils/util_calculate_psnr_ssim.py -# ----------------------------------------------------------------------------------- - -import cv2 -import torch -import numpy as np - -def calculate_psnr(img1, img2, crop_border, input_order='HWC', test_y_channel=False): - """Calculate PSNR (Peak Signal-to-Noise Ratio). - Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio - Args: - img1 (ndarray): Images with range [0, 255]. - img2 (ndarray): Images with range [0, 255]. - crop_border (int): Cropped pixels in each edge of an image. These - pixels are not involved in the PSNR calculation. - input_order (str): Whether the input order is 'HWC' or 'CHW'. - Default: 'HWC'. - test_y_channel (bool): Test on Y channel of YCbCr. Default: False. - Returns: - float: psnr result. - """ - - assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.') - if input_order not in ['HWC', 'CHW']: - raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"') - img1 = reorder_image(img1, input_order=input_order) - img2 = reorder_image(img2, input_order=input_order) - img1 = img1.astype(np.float64) - img2 = img2.astype(np.float64) - - if crop_border != 0: - img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...] - img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...] - - if test_y_channel: - img1 = to_y_channel(img1) - img2 = to_y_channel(img2) - - mse = np.mean((img1 - img2) ** 2) - if mse == 0: - return float('inf') - return 20. * np.log10(255. / np.sqrt(mse)) - - -def _ssim(img1, img2): - """Calculate SSIM (structural similarity) for one channel images. - It is called by func:`calculate_ssim`. - Args: - img1 (ndarray): Images with range [0, 255] with order 'HWC'. - img2 (ndarray): Images with range [0, 255] with order 'HWC'. - Returns: - float: ssim result. - """ - - C1 = (0.01 * 255) ** 2 - C2 = (0.03 * 255) ** 2 - - img1 = img1.astype(np.float64) - img2 = img2.astype(np.float64) - kernel = cv2.getGaussianKernel(11, 1.5) - window = np.outer(kernel, kernel.transpose()) - - mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] - mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] - mu1_sq = mu1 ** 2 - mu2_sq = mu2 ** 2 - mu1_mu2 = mu1 * mu2 - sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq - sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq - sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 - - ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)) - return ssim_map.mean() - - -def calculate_ssim(img1, img2, crop_border, input_order='HWC', test_y_channel=False): - """Calculate SSIM (structural similarity). - Ref: - Image quality assessment: From error visibility to structural similarity - The results are the same as that of the official released MATLAB code in - https://ece.uwaterloo.ca/~z70wang/research/ssim/. - For three-channel images, SSIM is calculated for each channel and then - averaged. - Args: - img1 (ndarray): Images with range [0, 255]. - img2 (ndarray): Images with range [0, 255]. - crop_border (int): Cropped pixels in each edge of an image. These - pixels are not involved in the SSIM calculation. - input_order (str): Whether the input order is 'HWC' or 'CHW'. - Default: 'HWC'. - test_y_channel (bool): Test on Y channel of YCbCr. Default: False. - Returns: - float: ssim result. - """ - - assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.') - if input_order not in ['HWC', 'CHW']: - raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"') - img1 = reorder_image(img1, input_order=input_order) - img2 = reorder_image(img2, input_order=input_order) - img1 = img1.astype(np.float64) - img2 = img2.astype(np.float64) - - if crop_border != 0: - img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...] - img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...] - - if test_y_channel: - img1 = to_y_channel(img1) - img2 = to_y_channel(img2) - - ssims = [] - for i in range(img1.shape[2]): - ssims.append(_ssim(img1[..., i], img2[..., i])) - return np.array(ssims).mean() - - -def _blocking_effect_factor(im): - block_size = 8 - - block_horizontal_positions = torch.arange(7, im.shape[3] - 1, 8) - block_vertical_positions = torch.arange(7, im.shape[2] - 1, 8) - - horizontal_block_difference = ( - (im[:, :, :, block_horizontal_positions] - im[:, :, :, block_horizontal_positions + 1]) ** 2).sum( - 3).sum(2).sum(1) - vertical_block_difference = ( - (im[:, :, block_vertical_positions, :] - im[:, :, block_vertical_positions + 1, :]) ** 2).sum(3).sum( - 2).sum(1) - - nonblock_horizontal_positions = np.setdiff1d(torch.arange(0, im.shape[3] - 1), block_horizontal_positions) - nonblock_vertical_positions = np.setdiff1d(torch.arange(0, im.shape[2] - 1), block_vertical_positions) - - horizontal_nonblock_difference = ( - (im[:, :, :, nonblock_horizontal_positions] - im[:, :, :, nonblock_horizontal_positions + 1]) ** 2).sum( - 3).sum(2).sum(1) - vertical_nonblock_difference = ( - (im[:, :, nonblock_vertical_positions, :] - im[:, :, nonblock_vertical_positions + 1, :]) ** 2).sum( - 3).sum(2).sum(1) - - n_boundary_horiz = im.shape[2] * (im.shape[3] // block_size - 1) - n_boundary_vert = im.shape[3] * (im.shape[2] // block_size - 1) - boundary_difference = (horizontal_block_difference + vertical_block_difference) / ( - n_boundary_horiz + n_boundary_vert) - - n_nonboundary_horiz = im.shape[2] * (im.shape[3] - 1) - n_boundary_horiz - n_nonboundary_vert = im.shape[3] * (im.shape[2] - 1) - n_boundary_vert - nonboundary_difference = (horizontal_nonblock_difference + vertical_nonblock_difference) / ( - n_nonboundary_horiz + n_nonboundary_vert) - - scaler = np.log2(block_size) / np.log2(min([im.shape[2], im.shape[3]])) - bef = scaler * (boundary_difference - nonboundary_difference) - - bef[boundary_difference <= nonboundary_difference] = 0 - return bef - - -def calculate_psnrb(img1, img2, crop_border, input_order='HWC', test_y_channel=False): - """Calculate PSNR-B (Peak Signal-to-Noise Ratio). - Ref: Quality assessment of deblocked images, for JPEG image deblocking evaluation - # https://gitlab.com/Queuecumber/quantization-guided-ac/-/blob/master/metrics/psnrb.py - Args: - img1 (ndarray): Images with range [0, 255]. - img2 (ndarray): Images with range [0, 255]. - crop_border (int): Cropped pixels in each edge of an image. These - pixels are not involved in the PSNR calculation. - input_order (str): Whether the input order is 'HWC' or 'CHW'. - Default: 'HWC'. - test_y_channel (bool): Test on Y channel of YCbCr. Default: False. - Returns: - float: psnr result. - """ - - assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.') - if input_order not in ['HWC', 'CHW']: - raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"') - img1 = reorder_image(img1, input_order=input_order) - img2 = reorder_image(img2, input_order=input_order) - img1 = img1.astype(np.float64) - img2 = img2.astype(np.float64) - - if crop_border != 0: - img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...] - img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...] - - if test_y_channel: - img1 = to_y_channel(img1) - img2 = to_y_channel(img2) - - # follow https://gitlab.com/Queuecumber/quantization-guided-ac/-/blob/master/metrics/psnrb.py - img1 = torch.from_numpy(img1).permute(2, 0, 1).unsqueeze(0) / 255. - img2 = torch.from_numpy(img2).permute(2, 0, 1).unsqueeze(0) / 255. - - total = 0 - for c in range(img1.shape[1]): - mse = torch.nn.functional.mse_loss(img1[:, c:c + 1, :, :], img2[:, c:c + 1, :, :], reduction='none') - bef = _blocking_effect_factor(img1[:, c:c + 1, :, :]) - - mse = mse.view(mse.shape[0], -1).mean(1) - total += 10 * torch.log10(1 / (mse + bef)) - - return float(total) / img1.shape[1] - - -def reorder_image(img, input_order='HWC'): - """Reorder images to 'HWC' order. - If the input_order is (h, w), return (h, w, 1); - If the input_order is (c, h, w), return (h, w, c); - If the input_order is (h, w, c), return as it is. - Args: - img (ndarray): Input image. - input_order (str): Whether the input order is 'HWC' or 'CHW'. - If the input image shape is (h, w), input_order will not have - effects. Default: 'HWC'. - Returns: - ndarray: reordered image. - """ - - if input_order not in ['HWC', 'CHW']: - raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' "'HWC' and 'CHW'") - if len(img.shape) == 2: - img = img[..., None] - if input_order == 'CHW': - img = img.transpose(1, 2, 0) - return img - - -def to_y_channel(img): - """Change to Y channel of YCbCr. - Args: - img (ndarray): Images with range [0, 255]. - Returns: - (ndarray): Images with range [0, 255] (float type) without round. - """ - img = img.astype(np.float32) / 255. - if img.ndim == 3 and img.shape[2] == 3: - img = bgr2ycbcr(img, y_only=True) - img = img[..., None] - return img * 255. - - -def _convert_input_type_range(img): - """Convert the type and range of the input image. - It converts the input image to np.float32 type and range of [0, 1]. - It is mainly used for pre-processing the input image in colorspace - convertion functions such as rgb2ycbcr and ycbcr2rgb. - Args: - img (ndarray): The input image. It accepts: - 1. np.uint8 type with range [0, 255]; - 2. np.float32 type with range [0, 1]. - Returns: - (ndarray): The converted image with type of np.float32 and range of - [0, 1]. - """ - img_type = img.dtype - img = img.astype(np.float32) - if img_type == np.float32: - pass - elif img_type == np.uint8: - img /= 255. - else: - raise TypeError('The img type should be np.float32 or np.uint8, ' f'but got {img_type}') - return img - - -def _convert_output_type_range(img, dst_type): - """Convert the type and range of the image according to dst_type. - It converts the image to desired type and range. If `dst_type` is np.uint8, - images will be converted to np.uint8 type with range [0, 255]. If - `dst_type` is np.float32, it converts the image to np.float32 type with - range [0, 1]. - It is mainly used for post-processing images in colorspace convertion - functions such as rgb2ycbcr and ycbcr2rgb. - Args: - img (ndarray): The image to be converted with np.float32 type and - range [0, 255]. - dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it - converts the image to np.uint8 type with range [0, 255]. If - dst_type is np.float32, it converts the image to np.float32 type - with range [0, 1]. - Returns: - (ndarray): The converted image with desired type and range. - """ - if dst_type not in (np.uint8, np.float32): - raise TypeError('The dst_type should be np.float32 or np.uint8, ' f'but got {dst_type}') - if dst_type == np.uint8: - img = img.round() - else: - img /= 255. - return img.astype(dst_type) - - -def bgr2ycbcr(img, y_only=False): - """Convert a BGR image to YCbCr image. - The bgr version of rgb2ycbcr. - It implements the ITU-R BT.601 conversion for standard-definition - television. See more details in - https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. - It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`. - In OpenCV, it implements a JPEG conversion. See more details in - https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. - Args: - img (ndarray): The input image. It accepts: - 1. np.uint8 type with range [0, 255]; - 2. np.float32 type with range [0, 1]. - y_only (bool): Whether to only return Y channel. Default: False. - Returns: - ndarray: The converted YCbCr image. The output image has the same type - and range as input image. - """ - img_type = img.dtype - img = _convert_input_type_range(img) - if y_only: - out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0 - else: - out_img = np.matmul( - img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [65.481, -37.797, 112.0]]) + [16, 128, 128] - out_img = _convert_output_type_range(out_img, img_type) - return out_img \ No newline at end of file diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Cipher/PKCS1_OAEP.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Cipher/PKCS1_OAEP.py deleted file mode 100644 index 57a982b85041637c25576801edab42743aff719c..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Cipher/PKCS1_OAEP.py +++ /dev/null @@ -1,239 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Cipher/PKCS1_OAEP.py : PKCS#1 OAEP -# -# =================================================================== -# The contents of this file are dedicated to the public domain. To -# the extent that dedication to the public domain is not available, -# everyone is granted a worldwide, perpetual, royalty-free, -# non-exclusive license to exercise all rights associated with the -# contents of this file for any purpose whatsoever. -# No rights are reserved. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# =================================================================== - -from Crypto.Signature.pss import MGF1 -import Crypto.Hash.SHA1 - -from Crypto.Util.py3compat import bord, _copy_bytes -import Crypto.Util.number -from Crypto.Util.number import ceil_div, bytes_to_long, long_to_bytes -from Crypto.Util.strxor import strxor -from Crypto import Random - -class PKCS1OAEP_Cipher: - """Cipher object for PKCS#1 v1.5 OAEP. - Do not create directly: use :func:`new` instead.""" - - def __init__(self, key, hashAlgo, mgfunc, label, randfunc): - """Initialize this PKCS#1 OAEP cipher object. - - :Parameters: - key : an RSA key object - If a private half is given, both encryption and decryption are possible. - If a public half is given, only encryption is possible. - hashAlgo : hash object - The hash function to use. This can be a module under `Crypto.Hash` - or an existing hash object created from any of such modules. If not specified, - `Crypto.Hash.SHA1` is used. - mgfunc : callable - A mask generation function that accepts two parameters: a string to - use as seed, and the lenth of the mask to generate, in bytes. - If not specified, the standard MGF1 consistent with ``hashAlgo`` is used (a safe choice). - label : bytes/bytearray/memoryview - A label to apply to this particular encryption. If not specified, - an empty string is used. Specifying a label does not improve - security. - randfunc : callable - A function that returns random bytes. - - :attention: Modify the mask generation function only if you know what you are doing. - Sender and receiver must use the same one. - """ - self._key = key - - if hashAlgo: - self._hashObj = hashAlgo - else: - self._hashObj = Crypto.Hash.SHA1 - - if mgfunc: - self._mgf = mgfunc - else: - self._mgf = lambda x,y: MGF1(x,y,self._hashObj) - - self._label = _copy_bytes(None, None, label) - self._randfunc = randfunc - - def can_encrypt(self): - """Legacy function to check if you can call :meth:`encrypt`. - - .. deprecated:: 3.0""" - return self._key.can_encrypt() - - def can_decrypt(self): - """Legacy function to check if you can call :meth:`decrypt`. - - .. deprecated:: 3.0""" - return self._key.can_decrypt() - - def encrypt(self, message): - """Encrypt a message with PKCS#1 OAEP. - - :param message: - The message to encrypt, also known as plaintext. It can be of - variable length, but not longer than the RSA modulus (in bytes) - minus 2, minus twice the hash output size. - For instance, if you use RSA 2048 and SHA-256, the longest message - you can encrypt is 190 byte long. - :type message: bytes/bytearray/memoryview - - :returns: The ciphertext, as large as the RSA modulus. - :rtype: bytes - - :raises ValueError: - if the message is too long. - """ - - # See 7.1.1 in RFC3447 - modBits = Crypto.Util.number.size(self._key.n) - k = ceil_div(modBits, 8) # Convert from bits to bytes - hLen = self._hashObj.digest_size - mLen = len(message) - - # Step 1b - ps_len = k - mLen - 2 * hLen - 2 - if ps_len < 0: - raise ValueError("Plaintext is too long.") - # Step 2a - lHash = self._hashObj.new(self._label).digest() - # Step 2b - ps = b'\x00' * ps_len - # Step 2c - db = lHash + ps + b'\x01' + _copy_bytes(None, None, message) - # Step 2d - ros = self._randfunc(hLen) - # Step 2e - dbMask = self._mgf(ros, k-hLen-1) - # Step 2f - maskedDB = strxor(db, dbMask) - # Step 2g - seedMask = self._mgf(maskedDB, hLen) - # Step 2h - maskedSeed = strxor(ros, seedMask) - # Step 2i - em = b'\x00' + maskedSeed + maskedDB - # Step 3a (OS2IP) - em_int = bytes_to_long(em) - # Step 3b (RSAEP) - m_int = self._key._encrypt(em_int) - # Step 3c (I2OSP) - c = long_to_bytes(m_int, k) - return c - - def decrypt(self, ciphertext): - """Decrypt a message with PKCS#1 OAEP. - - :param ciphertext: The encrypted message. - :type ciphertext: bytes/bytearray/memoryview - - :returns: The original message (plaintext). - :rtype: bytes - - :raises ValueError: - if the ciphertext has the wrong length, or if decryption - fails the integrity check (in which case, the decryption - key is probably wrong). - :raises TypeError: - if the RSA key has no private half (i.e. you are trying - to decrypt using a public key). - """ - - # See 7.1.2 in RFC3447 - modBits = Crypto.Util.number.size(self._key.n) - k = ceil_div(modBits,8) # Convert from bits to bytes - hLen = self._hashObj.digest_size - - # Step 1b and 1c - if len(ciphertext) != k or k -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# =================================================================== - -"""Self-test for Math.Numbers""" - -import sys -import unittest - -from Crypto.SelfTest.st_common import list_test_cases - -from Crypto.Util.py3compat import * - -from Crypto.Math._IntegerNative import IntegerNative - - -class TestIntegerBase(unittest.TestCase): - - def setUp(self): - raise NotImplementedError("To be implemented") - - def Integers(self, *arg): - return map(self.Integer, arg) - - def test_init_and_equality(self): - Integer = self.Integer - - v1 = Integer(23) - v2 = Integer(v1) - v3 = Integer(-9) - self.assertRaises(ValueError, Integer, 1.0) - - v4 = Integer(10**10) - v5 = Integer(-10**10) - - v6 = Integer(0xFFFF) - v7 = Integer(0xFFFFFFFF) - v8 = Integer(0xFFFFFFFFFFFFFFFF) - - self.assertEqual(v1, v1) - self.assertEqual(v1, 23) - self.assertEqual(v1, v2) - self.assertEqual(v3, -9) - self.assertEqual(v4, 10 ** 10) - self.assertEqual(v5, -10 ** 10) - self.assertEqual(v6, 0xFFFF) - self.assertEqual(v7, 0xFFFFFFFF) - self.assertEqual(v8, 0xFFFFFFFFFFFFFFFF) - - self.assertFalse(v1 == v4) - - # Init and comparison between Integer's - v6 = Integer(v1) - self.assertEqual(v1, v6) - - self.assertFalse(Integer(0) == None) - - def test_conversion_to_int(self): - v1, v2 = self.Integers(-23, 2 ** 1000) - self.assertEqual(int(v1), -23) - self.assertEqual(int(v2), 2 ** 1000) - - def test_equality_with_ints(self): - v1, v2, v3 = self.Integers(23, -89, 2 ** 1000) - self.assertTrue(v1 == 23) - self.assertTrue(v2 == -89) - self.assertFalse(v1 == 24) - self.assertTrue(v3 == 2 ** 1000) - - def test_conversion_to_str(self): - v1, v2, v3, v4 = self.Integers(20, 0, -20, 2 ** 1000) - self.assertTrue(str(v1) == "20") - self.assertTrue(str(v2) == "0") - self.assertTrue(str(v3) == "-20") - self.assertTrue(str(v4) == "10715086071862673209484250490600018105614048117055336074437503883703510511249361224931983788156958581275946729175531468251871452856923140435984577574698574803934567774824230985421074605062371141877954182153046474983581941267398767559165543946077062914571196477686542167660429831652624386837205668069376") - - def test_repr(self): - v1, v2 = self.Integers(-1, 2**80) - self.assertEqual(repr(v1), "Integer(-1)") - self.assertEqual(repr(v2), "Integer(1208925819614629174706176)") - - def test_conversion_to_bytes(self): - Integer = self.Integer - - v1 = Integer(0x17) - self.assertEqual(b("\x17"), v1.to_bytes()) - - v2 = Integer(0xFFFE) - self.assertEqual(b("\xFF\xFE"), v2.to_bytes()) - self.assertEqual(b("\x00\xFF\xFE"), v2.to_bytes(3)) - self.assertRaises(ValueError, v2.to_bytes, 1) - - self.assertEqual(b("\xFE\xFF"), v2.to_bytes(byteorder='little')) - self.assertEqual(b("\xFE\xFF\x00"), v2.to_bytes(3, byteorder='little')) - - v3 = Integer(-90) - self.assertRaises(ValueError, v3.to_bytes) - self.assertRaises(ValueError, v3.to_bytes, byteorder='bittle') - - def test_conversion_from_bytes(self): - Integer = self.Integer - - v1 = Integer.from_bytes(b"\x00") - self.assertTrue(isinstance(v1, Integer)) - self.assertEqual(0, v1) - - v2 = Integer.from_bytes(b"\x00\x01") - self.assertEqual(1, v2) - - v3 = Integer.from_bytes(b"\xFF\xFF") - self.assertEqual(0xFFFF, v3) - - v4 = Integer.from_bytes(b"\x00\x01", 'big') - self.assertEqual(1, v4) - - v5 = Integer.from_bytes(b"\x00\x01", byteorder='big') - self.assertEqual(1, v5) - - v6 = Integer.from_bytes(b"\x00\x01", byteorder='little') - self.assertEqual(0x0100, v6) - - self.assertRaises(ValueError, Integer.from_bytes, b'\x09', 'bittle') - - def test_inequality(self): - # Test Integer!=Integer and Integer!=int - v1, v2, v3, v4 = self.Integers(89, 89, 90, -8) - self.assertTrue(v1 != v3) - self.assertTrue(v1 != 90) - self.assertFalse(v1 != v2) - self.assertFalse(v1 != 89) - self.assertTrue(v1 != v4) - self.assertTrue(v4 != v1) - self.assertTrue(self.Integer(0) != None) - - def test_less_than(self): - # Test IntegerInteger and Integer>int - v1, v2, v3, v4, v5 = self.Integers(13, 13, 14, -8, 2 ** 10) - self.assertTrue(v3 > v1) - self.assertTrue(v3 > 13) - self.assertFalse(v1 > v1) - self.assertFalse(v1 > v2) - self.assertFalse(v1 > 13) - self.assertTrue(v1 > v4) - self.assertFalse(v4 > v1) - self.assertTrue(v5 > v1) - self.assertFalse(v1 > v5) - - def test_more_than_or_equal(self): - # Test Integer>=Integer and Integer>=int - v1, v2, v3, v4 = self.Integers(13, 13, 14, -4) - self.assertTrue(v3 >= v1) - self.assertTrue(v3 >= 13) - self.assertTrue(v1 >= v2) - self.assertTrue(v1 >= v1) - self.assertTrue(v1 >= 13) - self.assertFalse(v4 >= v1) - - def test_bool(self): - v1, v2, v3, v4 = self.Integers(0, 10, -9, 2 ** 10) - self.assertFalse(v1) - self.assertFalse(bool(v1)) - self.assertTrue(v2) - self.assertTrue(bool(v2)) - self.assertTrue(v3) - self.assertTrue(v4) - - def test_is_negative(self): - v1, v2, v3, v4, v5 = self.Integers(-3 ** 100, -3, 0, 3, 3**100) - self.assertTrue(v1.is_negative()) - self.assertTrue(v2.is_negative()) - self.assertFalse(v4.is_negative()) - self.assertFalse(v5.is_negative()) - - def test_addition(self): - # Test Integer+Integer and Integer+int - v1, v2, v3 = self.Integers(7, 90, -7) - self.assertTrue(isinstance(v1 + v2, self.Integer)) - self.assertEqual(v1 + v2, 97) - self.assertEqual(v1 + 90, 97) - self.assertEqual(v1 + v3, 0) - self.assertEqual(v1 + (-7), 0) - self.assertEqual(v1 + 2 ** 10, 2 ** 10 + 7) - - def test_subtraction(self): - # Test Integer-Integer and Integer-int - v1, v2, v3 = self.Integers(7, 90, -7) - self.assertTrue(isinstance(v1 - v2, self.Integer)) - self.assertEqual(v2 - v1, 83) - self.assertEqual(v2 - 7, 83) - self.assertEqual(v2 - v3, 97) - self.assertEqual(v1 - (-7), 14) - self.assertEqual(v1 - 2 ** 10, 7 - 2 ** 10) - - def test_multiplication(self): - # Test Integer-Integer and Integer-int - v1, v2, v3, v4 = self.Integers(4, 5, -2, 2 ** 10) - self.assertTrue(isinstance(v1 * v2, self.Integer)) - self.assertEqual(v1 * v2, 20) - self.assertEqual(v1 * 5, 20) - self.assertEqual(v1 * -2, -8) - self.assertEqual(v1 * 2 ** 10, 4 * (2 ** 10)) - - def test_floor_div(self): - v1, v2, v3 = self.Integers(3, 8, 2 ** 80) - self.assertTrue(isinstance(v1 // v2, self.Integer)) - self.assertEqual(v2 // v1, 2) - self.assertEqual(v2 // 3, 2) - self.assertEqual(v2 // -3, -3) - self.assertEqual(v3 // 2 ** 79, 2) - self.assertRaises(ZeroDivisionError, lambda: v1 // 0) - - def test_remainder(self): - # Test Integer%Integer and Integer%int - v1, v2, v3 = self.Integers(23, 5, -4) - self.assertTrue(isinstance(v1 % v2, self.Integer)) - self.assertEqual(v1 % v2, 3) - self.assertEqual(v1 % 5, 3) - self.assertEqual(v3 % 5, 1) - self.assertEqual(v1 % 2 ** 10, 23) - self.assertRaises(ZeroDivisionError, lambda: v1 % 0) - self.assertRaises(ValueError, lambda: v1 % -6) - - def test_simple_exponentiation(self): - v1, v2, v3 = self.Integers(4, 3, -2) - self.assertTrue(isinstance(v1 ** v2, self.Integer)) - self.assertEqual(v1 ** v2, 64) - self.assertEqual(pow(v1, v2), 64) - self.assertEqual(v1 ** 3, 64) - self.assertEqual(pow(v1, 3), 64) - self.assertEqual(v3 ** 2, 4) - self.assertEqual(v3 ** 3, -8) - - self.assertRaises(ValueError, pow, v1, -3) - - def test_modular_exponentiation(self): - v1, v2, v3 = self.Integers(23, 5, 17) - - self.assertTrue(isinstance(pow(v1, v2, v3), self.Integer)) - self.assertEqual(pow(v1, v2, v3), 7) - self.assertEqual(pow(v1, 5, v3), 7) - self.assertEqual(pow(v1, v2, 17), 7) - self.assertEqual(pow(v1, 5, 17), 7) - self.assertEqual(pow(v1, 0, 17), 1) - self.assertEqual(pow(v1, 1, 2 ** 80), 23) - self.assertEqual(pow(v1, 2 ** 80, 89298), 17689) - - self.assertRaises(ZeroDivisionError, pow, v1, 5, 0) - self.assertRaises(ValueError, pow, v1, 5, -4) - self.assertRaises(ValueError, pow, v1, -3, 8) - - def test_inplace_exponentiation(self): - v1 = self.Integer(4) - v1.inplace_pow(2) - self.assertEqual(v1, 16) - - v1 = self.Integer(4) - v1.inplace_pow(2, 15) - self.assertEqual(v1, 1) - - def test_abs(self): - v1, v2, v3, v4, v5 = self.Integers(-2 ** 100, -2, 0, 2, 2 ** 100) - self.assertEqual(abs(v1), 2 ** 100) - self.assertEqual(abs(v2), 2) - self.assertEqual(abs(v3), 0) - self.assertEqual(abs(v4), 2) - self.assertEqual(abs(v5), 2 ** 100) - - def test_sqrt(self): - v1, v2, v3, v4 = self.Integers(-2, 0, 49, 10**100) - - self.assertRaises(ValueError, v1.sqrt) - self.assertEqual(v2.sqrt(), 0) - self.assertEqual(v3.sqrt(), 7) - self.assertEqual(v4.sqrt(), 10**50) - - def test_sqrt_module(self): - - # Invalid modulus (non positive) - self.assertRaises(ValueError, self.Integer(5).sqrt, 0) - self.assertRaises(ValueError, self.Integer(5).sqrt, -1) - - # Simple cases - assert self.Integer(0).sqrt(5) == 0 - assert self.Integer(1).sqrt(5) in (1, 4) - - # Test with all quadratic residues in several fields - for p in (11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53): - for i in range(0, p): - square = i**2 % p - res = self.Integer(square).sqrt(p) - assert res in (i, p - i) - - # 2 is a non-quadratic reside in Z_11 - self.assertRaises(ValueError, self.Integer(2).sqrt, 11) - - # 10 is not a prime - self.assertRaises(ValueError, self.Integer(4).sqrt, 10) - - # 5 is square residue of 4 and 7 - assert self.Integer(5 - 11).sqrt(11) in (4, 7) - assert self.Integer(5 + 11).sqrt(11) in (4, 7) - - def test_in_place_add(self): - v1, v2 = self.Integers(10, 20) - - v1 += v2 - self.assertEqual(v1, 30) - v1 += 10 - self.assertEqual(v1, 40) - v1 += -1 - self.assertEqual(v1, 39) - v1 += 2 ** 1000 - self.assertEqual(v1, 39 + 2 ** 1000) - - def test_in_place_sub(self): - v1, v2 = self.Integers(10, 20) - - v1 -= v2 - self.assertEqual(v1, -10) - v1 -= -100 - self.assertEqual(v1, 90) - v1 -= 90000 - self.assertEqual(v1, -89910) - v1 -= -100000 - self.assertEqual(v1, 10090) - - def test_in_place_mul(self): - v1, v2 = self.Integers(3, 5) - - v1 *= v2 - self.assertEqual(v1, 15) - v1 *= 2 - self.assertEqual(v1, 30) - v1 *= -2 - self.assertEqual(v1, -60) - v1 *= 2 ** 1000 - self.assertEqual(v1, -60 * (2 ** 1000)) - - def test_in_place_modulus(self): - v1, v2 = self.Integers(20, 7) - - v1 %= v2 - self.assertEqual(v1, 6) - v1 %= 2 ** 1000 - self.assertEqual(v1, 6) - v1 %= 2 - self.assertEqual(v1, 0) - def t(): - v3 = self.Integer(9) - v3 %= 0 - self.assertRaises(ZeroDivisionError, t) - - def test_and(self): - v1, v2, v3 = self.Integers(0xF4, 0x31, -0xF) - self.assertTrue(isinstance(v1 & v2, self.Integer)) - self.assertEqual(v1 & v2, 0x30) - self.assertEqual(v1 & 0x31, 0x30) - self.assertEqual(v1 & v3, 0xF0) - self.assertEqual(v1 & -0xF, 0xF0) - self.assertEqual(v3 & -0xF, -0xF) - self.assertEqual(v2 & (2 ** 1000 + 0x31), 0x31) - - def test_or(self): - v1, v2, v3 = self.Integers(0x40, 0x82, -0xF) - self.assertTrue(isinstance(v1 | v2, self.Integer)) - self.assertEqual(v1 | v2, 0xC2) - self.assertEqual(v1 | 0x82, 0xC2) - self.assertEqual(v2 | v3, -0xD) - self.assertEqual(v2 | 2 ** 1000, 2 ** 1000 + 0x82) - - def test_right_shift(self): - v1, v2, v3 = self.Integers(0x10, 1, -0x10) - self.assertEqual(v1 >> 0, v1) - self.assertTrue(isinstance(v1 >> v2, self.Integer)) - self.assertEqual(v1 >> v2, 0x08) - self.assertEqual(v1 >> 1, 0x08) - self.assertRaises(ValueError, lambda: v1 >> -1) - self.assertEqual(v1 >> (2 ** 1000), 0) - - self.assertEqual(v3 >> 1, -0x08) - self.assertEqual(v3 >> (2 ** 1000), -1) - - def test_in_place_right_shift(self): - v1, v2, v3 = self.Integers(0x10, 1, -0x10) - v1 >>= 0 - self.assertEqual(v1, 0x10) - v1 >>= 1 - self.assertEqual(v1, 0x08) - v1 >>= v2 - self.assertEqual(v1, 0x04) - v3 >>= 1 - self.assertEqual(v3, -0x08) - def l(): - v4 = self.Integer(0x90) - v4 >>= -1 - self.assertRaises(ValueError, l) - def m1(): - v4 = self.Integer(0x90) - v4 >>= 2 ** 1000 - return v4 - self.assertEqual(0, m1()) - def m2(): - v4 = self.Integer(-1) - v4 >>= 2 ** 1000 - return v4 - self.assertEqual(-1, m2()) - - def _test_left_shift(self): - v1, v2, v3 = self.Integers(0x10, 1, -0x10) - self.assertEqual(v1 << 0, v1) - self.assertTrue(isinstance(v1 << v2, self.Integer)) - self.assertEqual(v1 << v2, 0x20) - self.assertEqual(v1 << 1, 0x20) - self.assertEqual(v3 << 1, -0x20) - self.assertRaises(ValueError, lambda: v1 << -1) - self.assertRaises(ValueError, lambda: v1 << (2 ** 1000)) - - def test_in_place_left_shift(self): - v1, v2, v3 = self.Integers(0x10, 1, -0x10) - v1 <<= 0 - self.assertEqual(v1, 0x10) - v1 <<= 1 - self.assertEqual(v1, 0x20) - v1 <<= v2 - self.assertEqual(v1, 0x40) - v3 <<= 1 - self.assertEqual(v3, -0x20) - def l(): - v4 = self.Integer(0x90) - v4 <<= -1 - self.assertRaises(ValueError, l) - def m(): - v4 = self.Integer(0x90) - v4 <<= 2 ** 1000 - self.assertRaises(ValueError, m) - - - def test_get_bit(self): - v1, v2, v3 = self.Integers(0x102, -3, 1) - self.assertEqual(v1.get_bit(0), 0) - self.assertEqual(v1.get_bit(1), 1) - self.assertEqual(v1.get_bit(v3), 1) - self.assertEqual(v1.get_bit(8), 1) - self.assertEqual(v1.get_bit(9), 0) - - self.assertRaises(ValueError, v1.get_bit, -1) - self.assertEqual(v1.get_bit(2 ** 1000), 0) - - self.assertRaises(ValueError, v2.get_bit, -1) - self.assertRaises(ValueError, v2.get_bit, 0) - self.assertRaises(ValueError, v2.get_bit, 1) - self.assertRaises(ValueError, v2.get_bit, 2 * 1000) - - def test_odd_even(self): - v1, v2, v3, v4, v5 = self.Integers(0, 4, 17, -4, -17) - - self.assertTrue(v1.is_even()) - self.assertTrue(v2.is_even()) - self.assertFalse(v3.is_even()) - self.assertTrue(v4.is_even()) - self.assertFalse(v5.is_even()) - - self.assertFalse(v1.is_odd()) - self.assertFalse(v2.is_odd()) - self.assertTrue(v3.is_odd()) - self.assertFalse(v4.is_odd()) - self.assertTrue(v5.is_odd()) - - def test_size_in_bits(self): - v1, v2, v3, v4 = self.Integers(0, 1, 0x100, -90) - self.assertEqual(v1.size_in_bits(), 1) - self.assertEqual(v2.size_in_bits(), 1) - self.assertEqual(v3.size_in_bits(), 9) - self.assertRaises(ValueError, v4.size_in_bits) - - def test_size_in_bytes(self): - v1, v2, v3, v4, v5, v6 = self.Integers(0, 1, 0xFF, 0x1FF, 0x10000, -9) - self.assertEqual(v1.size_in_bytes(), 1) - self.assertEqual(v2.size_in_bytes(), 1) - self.assertEqual(v3.size_in_bytes(), 1) - self.assertEqual(v4.size_in_bytes(), 2) - self.assertEqual(v5.size_in_bytes(), 3) - self.assertRaises(ValueError, v6.size_in_bits) - - def test_perfect_square(self): - - self.assertFalse(self.Integer(-9).is_perfect_square()) - self.assertTrue(self.Integer(0).is_perfect_square()) - self.assertTrue(self.Integer(1).is_perfect_square()) - self.assertFalse(self.Integer(2).is_perfect_square()) - self.assertFalse(self.Integer(3).is_perfect_square()) - self.assertTrue(self.Integer(4).is_perfect_square()) - self.assertTrue(self.Integer(39*39).is_perfect_square()) - self.assertFalse(self.Integer(39*39+1).is_perfect_square()) - - for x in range(100, 1000): - self.assertFalse(self.Integer(x**2+1).is_perfect_square()) - self.assertTrue(self.Integer(x**2).is_perfect_square()) - - def test_fail_if_divisible_by(self): - v1, v2, v3 = self.Integers(12, -12, 4) - - # No failure expected - v1.fail_if_divisible_by(7) - v2.fail_if_divisible_by(7) - v2.fail_if_divisible_by(2 ** 80) - - # Failure expected - self.assertRaises(ValueError, v1.fail_if_divisible_by, 4) - self.assertRaises(ValueError, v1.fail_if_divisible_by, v3) - - def test_multiply_accumulate(self): - v1, v2, v3 = self.Integers(4, 3, 2) - v1.multiply_accumulate(v2, v3) - self.assertEqual(v1, 10) - v1.multiply_accumulate(v2, 2) - self.assertEqual(v1, 16) - v1.multiply_accumulate(3, v3) - self.assertEqual(v1, 22) - v1.multiply_accumulate(1, -2) - self.assertEqual(v1, 20) - v1.multiply_accumulate(-2, 1) - self.assertEqual(v1, 18) - v1.multiply_accumulate(1, 2 ** 1000) - self.assertEqual(v1, 18 + 2 ** 1000) - v1.multiply_accumulate(2 ** 1000, 1) - self.assertEqual(v1, 18 + 2 ** 1001) - - def test_set(self): - v1, v2 = self.Integers(3, 6) - v1.set(v2) - self.assertEqual(v1, 6) - v1.set(9) - self.assertEqual(v1, 9) - v1.set(-2) - self.assertEqual(v1, -2) - v1.set(2 ** 1000) - self.assertEqual(v1, 2 ** 1000) - - def test_inverse(self): - v1, v2, v3, v4, v5, v6 = self.Integers(2, 5, -3, 0, 723872, 3433) - - self.assertTrue(isinstance(v1.inverse(v2), self.Integer)) - self.assertEqual(v1.inverse(v2), 3) - self.assertEqual(v1.inverse(5), 3) - self.assertEqual(v3.inverse(5), 3) - self.assertEqual(v5.inverse(92929921), 58610507) - self.assertEqual(v6.inverse(9912), 5353) - - self.assertRaises(ValueError, v2.inverse, 10) - self.assertRaises(ValueError, v1.inverse, -3) - self.assertRaises(ValueError, v4.inverse, 10) - self.assertRaises(ZeroDivisionError, v2.inverse, 0) - - def test_inplace_inverse(self): - v1, v2 = self.Integers(2, 5) - - v1.inplace_inverse(v2) - self.assertEqual(v1, 3) - - def test_gcd(self): - v1, v2, v3, v4 = self.Integers(6, 10, 17, -2) - self.assertTrue(isinstance(v1.gcd(v2), self.Integer)) - self.assertEqual(v1.gcd(v2), 2) - self.assertEqual(v1.gcd(10), 2) - self.assertEqual(v1.gcd(v3), 1) - self.assertEqual(v1.gcd(-2), 2) - self.assertEqual(v4.gcd(6), 2) - - def test_lcm(self): - v1, v2, v3, v4, v5 = self.Integers(6, 10, 17, -2, 0) - self.assertTrue(isinstance(v1.lcm(v2), self.Integer)) - self.assertEqual(v1.lcm(v2), 30) - self.assertEqual(v1.lcm(10), 30) - self.assertEqual(v1.lcm(v3), 102) - self.assertEqual(v1.lcm(-2), 6) - self.assertEqual(v4.lcm(6), 6) - self.assertEqual(v1.lcm(0), 0) - self.assertEqual(v5.lcm(0), 0) - - def test_jacobi_symbol(self): - - data = ( - (1001, 1, 1), - (19, 45, 1), - (8, 21, -1), - (5, 21, 1), - (610, 987, -1), - (1001, 9907, -1), - (5, 3439601197, -1) - ) - - js = self.Integer.jacobi_symbol - - # Jacobi symbol is always 1 for k==1 or n==1 - for k in range(1, 30): - self.assertEqual(js(k, 1), 1) - for n in range(1, 30, 2): - self.assertEqual(js(1, n), 1) - - # Fail if n is not positive odd - self.assertRaises(ValueError, js, 6, -2) - self.assertRaises(ValueError, js, 6, -1) - self.assertRaises(ValueError, js, 6, 0) - self.assertRaises(ValueError, js, 0, 0) - self.assertRaises(ValueError, js, 6, 2) - self.assertRaises(ValueError, js, 6, 4) - self.assertRaises(ValueError, js, 6, 6) - self.assertRaises(ValueError, js, 6, 8) - - for tv in data: - self.assertEqual(js(tv[0], tv[1]), tv[2]) - self.assertEqual(js(self.Integer(tv[0]), tv[1]), tv[2]) - self.assertEqual(js(tv[0], self.Integer(tv[1])), tv[2]) - - def test_jacobi_symbol_wikipedia(self): - - # Test vectors from https://en.wikipedia.org/wiki/Jacobi_symbol - tv = [ - (3, [(1, 1), (2, -1), (3, 0), (4, 1), (5, -1), (6, 0), (7, 1), (8, -1), (9, 0), (10, 1), (11, -1), (12, 0), (13, 1), (14, -1), (15, 0), (16, 1), (17, -1), (18, 0), (19, 1), (20, -1), (21, 0), (22, 1), (23, -1), (24, 0), (25, 1), (26, -1), (27, 0), (28, 1), (29, -1), (30, 0)]), - (5, [(1, 1), (2, -1), (3, -1), (4, 1), (5, 0), (6, 1), (7, -1), (8, -1), (9, 1), (10, 0), (11, 1), (12, -1), (13, -1), (14, 1), (15, 0), (16, 1), (17, -1), (18, -1), (19, 1), (20, 0), (21, 1), (22, -1), (23, -1), (24, 1), (25, 0), (26, 1), (27, -1), (28, -1), (29, 1), (30, 0)]), - (7, [(1, 1), (2, 1), (3, -1), (4, 1), (5, -1), (6, -1), (7, 0), (8, 1), (9, 1), (10, -1), (11, 1), (12, -1), (13, -1), (14, 0), (15, 1), (16, 1), (17, -1), (18, 1), (19, -1), (20, -1), (21, 0), (22, 1), (23, 1), (24, -1), (25, 1), (26, -1), (27, -1), (28, 0), (29, 1), (30, 1)]), - (9, [(1, 1), (2, 1), (3, 0), (4, 1), (5, 1), (6, 0), (7, 1), (8, 1), (9, 0), (10, 1), (11, 1), (12, 0), (13, 1), (14, 1), (15, 0), (16, 1), (17, 1), (18, 0), (19, 1), (20, 1), (21, 0), (22, 1), (23, 1), (24, 0), (25, 1), (26, 1), (27, 0), (28, 1), (29, 1), (30, 0)]), - (11, [(1, 1), (2, -1), (3, 1), (4, 1), (5, 1), (6, -1), (7, -1), (8, -1), (9, 1), (10, -1), (11, 0), (12, 1), (13, -1), (14, 1), (15, 1), (16, 1), (17, -1), (18, -1), (19, -1), (20, 1), (21, -1), (22, 0), (23, 1), (24, -1), (25, 1), (26, 1), (27, 1), (28, -1), (29, -1), (30, -1)]), - (13, [(1, 1), (2, -1), (3, 1), (4, 1), (5, -1), (6, -1), (7, -1), (8, -1), (9, 1), (10, 1), (11, -1), (12, 1), (13, 0), (14, 1), (15, -1), (16, 1), (17, 1), (18, -1), (19, -1), (20, -1), (21, -1), (22, 1), (23, 1), (24, -1), (25, 1), (26, 0), (27, 1), (28, -1), (29, 1), (30, 1)]), - (15, [(1, 1), (2, 1), (3, 0), (4, 1), (5, 0), (6, 0), (7, -1), (8, 1), (9, 0), (10, 0), (11, -1), (12, 0), (13, -1), (14, -1), (15, 0), (16, 1), (17, 1), (18, 0), (19, 1), (20, 0), (21, 0), (22, -1), (23, 1), (24, 0), (25, 0), (26, -1), (27, 0), (28, -1), (29, -1), (30, 0)]), - (17, [(1, 1), (2, 1), (3, -1), (4, 1), (5, -1), (6, -1), (7, -1), (8, 1), (9, 1), (10, -1), (11, -1), (12, -1), (13, 1), (14, -1), (15, 1), (16, 1), (17, 0), (18, 1), (19, 1), (20, -1), (21, 1), (22, -1), (23, -1), (24, -1), (25, 1), (26, 1), (27, -1), (28, -1), (29, -1), (30, 1)]), - (19, [(1, 1), (2, -1), (3, -1), (4, 1), (5, 1), (6, 1), (7, 1), (8, -1), (9, 1), (10, -1), (11, 1), (12, -1), (13, -1), (14, -1), (15, -1), (16, 1), (17, 1), (18, -1), (19, 0), (20, 1), (21, -1), (22, -1), (23, 1), (24, 1), (25, 1), (26, 1), (27, -1), (28, 1), (29, -1), (30, 1)]), - (21, [(1, 1), (2, -1), (3, 0), (4, 1), (5, 1), (6, 0), (7, 0), (8, -1), (9, 0), (10, -1), (11, -1), (12, 0), (13, -1), (14, 0), (15, 0), (16, 1), (17, 1), (18, 0), (19, -1), (20, 1), (21, 0), (22, 1), (23, -1), (24, 0), (25, 1), (26, 1), (27, 0), (28, 0), (29, -1), (30, 0)]), - (23, [(1, 1), (2, 1), (3, 1), (4, 1), (5, -1), (6, 1), (7, -1), (8, 1), (9, 1), (10, -1), (11, -1), (12, 1), (13, 1), (14, -1), (15, -1), (16, 1), (17, -1), (18, 1), (19, -1), (20, -1), (21, -1), (22, -1), (23, 0), (24, 1), (25, 1), (26, 1), (27, 1), (28, -1), (29, 1), (30, -1)]), - (25, [(1, 1), (2, 1), (3, 1), (4, 1), (5, 0), (6, 1), (7, 1), (8, 1), (9, 1), (10, 0), (11, 1), (12, 1), (13, 1), (14, 1), (15, 0), (16, 1), (17, 1), (18, 1), (19, 1), (20, 0), (21, 1), (22, 1), (23, 1), (24, 1), (25, 0), (26, 1), (27, 1), (28, 1), (29, 1), (30, 0)]), - (27, [(1, 1), (2, -1), (3, 0), (4, 1), (5, -1), (6, 0), (7, 1), (8, -1), (9, 0), (10, 1), (11, -1), (12, 0), (13, 1), (14, -1), (15, 0), (16, 1), (17, -1), (18, 0), (19, 1), (20, -1), (21, 0), (22, 1), (23, -1), (24, 0), (25, 1), (26, -1), (27, 0), (28, 1), (29, -1), (30, 0)]), - (29, [(1, 1), (2, -1), (3, -1), (4, 1), (5, 1), (6, 1), (7, 1), (8, -1), (9, 1), (10, -1), (11, -1), (12, -1), (13, 1), (14, -1), (15, -1), (16, 1), (17, -1), (18, -1), (19, -1), (20, 1), (21, -1), (22, 1), (23, 1), (24, 1), (25, 1), (26, -1), (27, -1), (28, 1), (29, 0), (30, 1)]), - ] - - js = self.Integer.jacobi_symbol - - for n, kj in tv: - for k, j in kj: - self.assertEqual(js(k, n), j) - - def test_hex(self): - v1, = self.Integers(0x10) - self.assertEqual(hex(v1), "0x10") - - -class TestIntegerInt(TestIntegerBase): - - def setUp(self): - self.Integer = IntegerNative - - -class testIntegerRandom(unittest.TestCase): - - def test_random_exact_bits(self): - - for _ in range(1000): - a = IntegerNative.random(exact_bits=8) - self.assertFalse(a < 128) - self.assertFalse(a >= 256) - - for bits_value in range(1024, 1024 + 8): - a = IntegerNative.random(exact_bits=bits_value) - self.assertFalse(a < 2**(bits_value - 1)) - self.assertFalse(a >= 2**bits_value) - - def test_random_max_bits(self): - - flag = False - for _ in range(1000): - a = IntegerNative.random(max_bits=8) - flag = flag or a < 128 - self.assertFalse(a>=256) - self.assertTrue(flag) - - for bits_value in range(1024, 1024 + 8): - a = IntegerNative.random(max_bits=bits_value) - self.assertFalse(a >= 2**bits_value) - - def test_random_bits_custom_rng(self): - - class CustomRNG(object): - def __init__(self): - self.counter = 0 - - def __call__(self, size): - self.counter += size - return bchr(0) * size - - custom_rng = CustomRNG() - a = IntegerNative.random(exact_bits=32, randfunc=custom_rng) - self.assertEqual(custom_rng.counter, 4) - - def test_random_range(self): - - func = IntegerNative.random_range - - for x in range(200): - a = func(min_inclusive=1, max_inclusive=15) - self.assertTrue(1 <= a <= 15) - - for x in range(200): - a = func(min_inclusive=1, max_exclusive=15) - self.assertTrue(1 <= a < 15) - - self.assertRaises(ValueError, func, min_inclusive=1, max_inclusive=2, - max_exclusive=3) - self.assertRaises(ValueError, func, max_inclusive=2, max_exclusive=3) - -def get_tests(config={}): - tests = [] - tests += list_test_cases(TestIntegerInt) - - try: - from Crypto.Math._IntegerGMP import IntegerGMP - - class TestIntegerGMP(TestIntegerBase): - def setUp(self): - self.Integer = IntegerGMP - - tests += list_test_cases(TestIntegerGMP) - except (ImportError, OSError) as e: - if sys.platform == "win32": - sys.stdout.write("Skipping GMP tests on Windows\n") - else: - sys.stdout.write("Skipping GMP tests (%s)\n" % str(e) ) - - try: - from Crypto.Math._IntegerCustom import IntegerCustom - - class TestIntegerCustomModexp(TestIntegerBase): - def setUp(self): - self.Integer = IntegerCustom - - tests += list_test_cases(TestIntegerCustomModexp) - except (ImportError, OSError) as e: - sys.stdout.write("Skipping custom modexp tests (%s)\n" % str(e) ) - - tests += list_test_cases(testIntegerRandom) - return tests - -if __name__ == '__main__': - suite = lambda: unittest.TestSuite(get_tests()) - unittest.main(defaultTest='suite') diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/anyio/_core/_testing.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/anyio/_core/_testing.py deleted file mode 100644 index c8191b3866f7104d2d02d32da9826c68ca17ac95..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/anyio/_core/_testing.py +++ /dev/null @@ -1,82 +0,0 @@ -from __future__ import annotations - -from typing import Any, Awaitable, Generator - -from ._compat import DeprecatedAwaitableList, _warn_deprecation -from ._eventloop import get_asynclib - - -class TaskInfo: - """ - Represents an asynchronous task. - - :ivar int id: the unique identifier of the task - :ivar parent_id: the identifier of the parent task, if any - :vartype parent_id: Optional[int] - :ivar str name: the description of the task (if any) - :ivar ~collections.abc.Coroutine coro: the coroutine object of the task - """ - - __slots__ = "_name", "id", "parent_id", "name", "coro" - - def __init__( - self, - id: int, - parent_id: int | None, - name: str | None, - coro: Generator[Any, Any, Any] | Awaitable[Any], - ): - func = get_current_task - self._name = f"{func.__module__}.{func.__qualname__}" - self.id: int = id - self.parent_id: int | None = parent_id - self.name: str | None = name - self.coro: Generator[Any, Any, Any] | Awaitable[Any] = coro - - def __eq__(self, other: object) -> bool: - if isinstance(other, TaskInfo): - return self.id == other.id - - return NotImplemented - - def __hash__(self) -> int: - return hash(self.id) - - def __repr__(self) -> str: - return f"{self.__class__.__name__}(id={self.id!r}, name={self.name!r})" - - def __await__(self) -> Generator[None, None, TaskInfo]: - _warn_deprecation(self) - if False: - yield - - return self - - def _unwrap(self) -> TaskInfo: - return self - - -def get_current_task() -> TaskInfo: - """ - Return the current task. - - :return: a representation of the current task - - """ - return get_asynclib().get_current_task() - - -def get_running_tasks() -> DeprecatedAwaitableList[TaskInfo]: - """ - Return a list of running tasks in the current event loop. - - :return: a list of task info objects - - """ - tasks = get_asynclib().get_running_tasks() - return DeprecatedAwaitableList(tasks, func=get_running_tasks) - - -async def wait_all_tasks_blocked() -> None: - """Wait until all other tasks are waiting for something.""" - await get_asynclib().wait_all_tasks_blocked() diff --git a/spaces/johnsamuel/stabilityai-stable-diffusion-2-1/app.py b/spaces/johnsamuel/stabilityai-stable-diffusion-2-1/app.py deleted file mode 100644 index 0160420876923d89f2ab5fccb9f4d13725e29972..0000000000000000000000000000000000000000 --- a/spaces/johnsamuel/stabilityai-stable-diffusion-2-1/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/stabilityai/stable-diffusion-2-1").launch() \ No newline at end of file diff --git a/spaces/jonathang/YoutubeSmartSpeed/app.py b/spaces/jonathang/YoutubeSmartSpeed/app.py deleted file mode 100644 index 2cfc04f59134d6b60979269a19003b9ffbf5e2eb..0000000000000000000000000000000000000000 --- a/spaces/jonathang/YoutubeSmartSpeed/app.py +++ /dev/null @@ -1,63 +0,0 @@ -import moviepy.editor as mp -import librosa -import numpy as np -import gradio as gr -import subprocess - - -def buffer_n_merge(intervals, buffer=0.1): - if not intervals: return [] - - new_intervals = [intervals[0]] - new_intervals[0][0] -= buffer - new_intervals[0][1] += buffer - - for start, end in intervals[1:]: - start -= buffer - end += buffer - if new_intervals[-1][-1] >= start: - new_intervals[-1][-1] = end - else: - new_intervals.append([start, end]) - return new_intervals - - -def download_and_process_video(in_f, threshold_db, buffer_sec): - vidpath = in_f.name - - # load the video - video = mp.VideoFileClip(vidpath) - # extract audio and convert to mono - audio = video.audio.to_soundarray(fps=22000) - - # use librosa to get non-silent intervals - non_silent_intervals = librosa.effects.split(audio[:, 0], top_db=threshold_db) - # convert non_silent_intervals from samples to seconds, as librosa works with samples not seconds - non_silent_intervals_sec = np.array(non_silent_intervals) / 22000 - - # Add buffer and merge intervals - non_silent_intervals_sec = buffer_n_merge(non_silent_intervals_sec.tolist(), buffer=buffer_sec) - - # Process video - # cut the video using the non-silent intervals and store the clips in a list - clips = [video.subclip(max(0, start_time), min(end_time, video.duration)) for start_time, end_time in non_silent_intervals_sec] - - output_file = 'my_concatenation.mp4' - final_clip = mp.concatenate_videoclips(clips) - final_clip.write_videofile(output_file, codec='libx264', audio_codec='aac', temp_audiofile='temp-audio.m4a', remove_temp=True) - - return output_file - - -iface = gr.Interface( - fn=download_and_process_video, - inputs=[ - gr.inputs.File(label="Video File (.mp4 only)", file_count='single', type='file'), - gr.inputs.Slider(minimum=1, maximum=70, step=1, default=30, label="Threshold (db)"), - gr.inputs.Slider(minimum=0, maximum=2, step=0.01, default=0.1, label="Buffer (sec)"), - ], - outputs=gr.outputs.Video(label="Processed Video"), - title="Video Silence Remover" -) - -iface.launch() diff --git a/spaces/jordonpeter01/MusicGen/tests/__init__.py b/spaces/jordonpeter01/MusicGen/tests/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/MusicGen/tests/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/jt5d/docker-test1/README.md b/spaces/jt5d/docker-test1/README.md deleted file mode 100644 index c9af83bc55a92ebe1fdb0334d718574805eef3f3..0000000000000000000000000000000000000000 --- a/spaces/jt5d/docker-test1/README.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Shiny for Python template -emoji: 🌍 -colorFrom: yellow -colorTo: indigo -sdk: docker -pinned: false -license: mit -duplicated_from: posit/shiny-for-python-template ---- - -This is a templated Space for [Shiny for Python](https://shiny.rstudio.com/py/). - - -To get started with a new app do the following: - -1) Install Shiny with `pip install shiny` -2) Create a new app with `shiny create .` -3) Then run the app with `shiny run --reload` - -To learn more about this framework please see the [Documentation](https://shiny.rstudio.com/py/docs/overview.html). diff --git a/spaces/juancopi81/whisper-youtube-2-hf_dataset/test/test_datapipeline.py b/spaces/juancopi81/whisper-youtube-2-hf_dataset/test/test_datapipeline.py deleted file mode 100644 index aeb389050b88f3b381a372d87ee72f5ca408f4b8..0000000000000000000000000000000000000000 --- a/spaces/juancopi81/whisper-youtube-2-hf_dataset/test/test_datapipeline.py +++ /dev/null @@ -1,108 +0,0 @@ -import os -import pytest -import sqlite3 -from pathlib import Path - -from youtube_transcriber.datapipeline import DataPipeline -from youtube_transcriber.datapipeline import create_hardcoded_data_pipeline -from youtube_transcriber.preprocessing.youtubevideopreprocessor import YoutubeVideoPreprocessor -from youtube_transcriber.loading.loaderiterator import LoaderIterator -from youtube_transcriber.loading.serialization import JsonSerializer -from youtube_transcriber.transforming.addtitletransform import AddTitleTransform -from youtube_transcriber.transforming.adddescriptiontransform import AddDescriptionTransform -from youtube_transcriber.transforming.whispertransform import WhisperTransform -from youtube_transcriber.transforming.batchtransformer import BatchTransformer -from youtube_transcriber.storing.sqlitebatchvideostorer import SQLiteBatchVideoStorer -from youtube_transcriber.storing.sqlitecontextmanager import SQLiteContextManager -from youtube_transcriber.storing.createdb import create_db - -@pytest.fixture -def expected_db_output(): - return [ - ("Tquotes", - "https://www.youtube.com/watch?v=NSkoGZ8J1Ag", - "Steve Jobs quotes Bob Dylan", - " Good morning. Good morning and welcome to Apple's 1984 annual shareholders meeting. I'd like to open the meeting with a part of an old poem about a 20-year-old poem by Dylan. That's Bob Dylan. Come writers and critics who prophesize with your pens and keep your eyes wide, the chance won't come again. And don't speak too soon for the wheels still in spin. And there's no telling who that it's naming. For the loser now will be later to win for the times they are a change in. Now."), - ("changminjen", - "https://www.youtube.com/watch?v=Ak516vtDTEA", - "My allegiance is to the Republic, to democracy!", - " I have brought peace, freedom, justice and security to my new empire. Your new empire don't make me kill you. Anakin, my allegiance is to the Republic, to democracy! If you're not with me, then you're my enemy. Only a Sith deals an absolute.") - ] - -@pytest.fixture -def data_pipeline(): - loader_iterator = LoaderIterator(JsonSerializer(), 2) - batch_transformer = BatchTransformer([AddTitleTransform(), - AddDescriptionTransform(), - WhisperTransform()]) - video_storer = SQLiteBatchVideoStorer() - sqlite_context_manager = SQLiteContextManager("dummy.db") - return DataPipeline(loader_iterator, - batch_transformer, - video_storer, - sqlite_context_manager) - -def test_datapipeline_init(): - data_pipeline = DataPipeline("loader_iterator", - "transformer", - "storer", - "context") - assert type(data_pipeline) == DataPipeline - assert data_pipeline.loader_iterator == "loader_iterator" - assert data_pipeline.batch_transformer == "transformer" - assert data_pipeline.storer == "storer" - assert data_pipeline.sqlite_context_manager == "context" - -def test_process_files(data_pipeline, expected_db_output): - test_folder = Path.home()/"whisper_gpt_pipeline/youtube_transcriber/test" - files = [Path(test_folder/"files/6.json"), Path(test_folder/"files/7.json")] - try: - create_db("dummy.db") - connection = sqlite3.connect("dummy.db") - cursor = connection.cursor() - - data_pipeline.process(files) - - cursor.execute("SELECT CHANNEL_NAME, URL, TITLE, TRANSCRIPTION FROM VIDEO") - videos = cursor.fetchall() - - for i in range(len(videos)): - assert videos[i][0] == expected_db_output[i][0] - assert videos[i][1] == expected_db_output[i][1] - assert videos[i][2] == expected_db_output[i][2] - assert videos[i][3] == expected_db_output[i][3] - finally: - os.remove("dummy.db") - -def test_process_video_batch(data_pipeline, expected_db_output): - video_data = [ - { - "channel_name": "Tquotes", - "url": "https://www.youtube.com/watch?v=NSkoGZ8J1Ag", - }, - { - "channel_name": "changminjen", - "url": "https://www.youtube.com/watch?v=Ak516vtDTEA", - } - ] - try: - create_db("dummy.db") - connection = sqlite3.connect("dummy.db") - cursor = connection.cursor() - - data_pipeline._process_video_batch(cursor, video_data) - - cursor.execute("SELECT CHANNEL_NAME, URL, TITLE, TRANSCRIPTION FROM VIDEO") - videos = cursor.fetchall() - - for i in range(len(videos)): - assert videos[i][0] == expected_db_output[i][0] - assert videos[i][1] == expected_db_output[i][1] - assert videos[i][2] == expected_db_output[i][2] - assert videos[i][3] == expected_db_output[i][3] - finally: - os.remove("dummy.db") - -def test_hardcoded_data_pipeline_is_instantiated(): - data_pipeline = create_hardcoded_data_pipeline() - assert type(data_pipeline) == DataPipeline \ No newline at end of file diff --git a/spaces/jvde/sovits-webui/monotonic_align/setup.py b/spaces/jvde/sovits-webui/monotonic_align/setup.py deleted file mode 100644 index 30c224807a70faa9df9c9eb75f8e80c8c867b16b..0000000000000000000000000000000000000000 --- a/spaces/jvde/sovits-webui/monotonic_align/setup.py +++ /dev/null @@ -1,9 +0,0 @@ -from distutils.core import setup -from Cython.Build import cythonize -import numpy - -setup( - name = 'monotonic_align', - ext_modules = cythonize("core.pyx"), - include_dirs=[numpy.get_include()] -) diff --git a/spaces/kandysh/clause_segmentation/README.md b/spaces/kandysh/clause_segmentation/README.md deleted file mode 100644 index 62c58e7072ea6847e93fccce40381936b427370c..0000000000000000000000000000000000000000 --- a/spaces/kandysh/clause_segmentation/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Clause_segmentation -emoji: 👁 -colorFrom: green -colorTo: indigo -sdk: streamlit -sdk_version: 1.9.0 -app_file: app.py -pinned: false -license: ms-pl ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/karolmajek/YOLOR/utils/autoanchor.py b/spaces/karolmajek/YOLOR/utils/autoanchor.py deleted file mode 100644 index 1e82492bf09050013cb1bee6fbec6baef5ff22a5..0000000000000000000000000000000000000000 --- a/spaces/karolmajek/YOLOR/utils/autoanchor.py +++ /dev/null @@ -1,152 +0,0 @@ -# Auto-anchor utils - -import numpy as np -import torch -import yaml -from scipy.cluster.vq import kmeans -from tqdm import tqdm - - -def check_anchor_order(m): - # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary - a = m.anchor_grid.prod(-1).view(-1) # anchor area - da = a[-1] - a[0] # delta a - ds = m.stride[-1] - m.stride[0] # delta s - if da.sign() != ds.sign(): # same order - print('Reversing anchor order') - m.anchors[:] = m.anchors.flip(0) - m.anchor_grid[:] = m.anchor_grid.flip(0) - - -def check_anchors(dataset, model, thr=4.0, imgsz=640): - # Check anchor fit to data, recompute if necessary - print('\nAnalyzing anchors... ', end='') - m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() - shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) - scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale - wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh - - def metric(k): # compute metric - r = wh[:, None] / k[None] - x = torch.min(r, 1. / r).min(2)[0] # ratio metric - best = x.max(1)[0] # best_x - aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold - bpr = (best > 1. / thr).float().mean() # best possible recall - return bpr, aat - - bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2)) - print('anchors/target = %.2f, Best Possible Recall (BPR) = %.4f' % (aat, bpr), end='') - if bpr < 0.98: # threshold to recompute - print('. Attempting to improve anchors, please wait...') - na = m.anchor_grid.numel() // 2 # number of anchors - new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) - new_bpr = metric(new_anchors.reshape(-1, 2))[0] - if new_bpr > bpr: # replace anchors - new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors) - m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference - m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss - check_anchor_order(m) - print('New anchors saved to model. Update model *.yaml to use these anchors in the future.') - else: - print('Original anchors better than new anchors. Proceeding with original anchors.') - print('') # newline - - -def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): - """ Creates kmeans-evolved anchors from training dataset - - Arguments: - path: path to dataset *.yaml, or a loaded dataset - n: number of anchors - img_size: image size used for training - thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 - gen: generations to evolve anchors using genetic algorithm - verbose: print all results - - Return: - k: kmeans evolved anchors - - Usage: - from utils.general import *; _ = kmean_anchors() - """ - thr = 1. / thr - - def metric(k, wh): # compute metrics - r = wh[:, None] / k[None] - x = torch.min(r, 1. / r).min(2)[0] # ratio metric - # x = wh_iou(wh, torch.tensor(k)) # iou metric - return x, x.max(1)[0] # x, best_x - - def anchor_fitness(k): # mutation fitness - _, best = metric(torch.tensor(k, dtype=torch.float32), wh) - return (best * (best > thr).float()).mean() # fitness - - def print_results(k): - k = k[np.argsort(k.prod(1))] # sort small to large - x, best = metric(k, wh0) - bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr - print('thr=%.2f: %.4f best possible recall, %.2f anchors past thr' % (thr, bpr, aat)) - print('n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thr=%.3f-mean: ' % - (n, img_size, x.mean(), best.mean(), x[x > thr].mean()), end='') - for i, x in enumerate(k): - print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg - return k - - if isinstance(path, str): # *.yaml file - with open(path) as f: - data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict - from utils.datasets import LoadImagesAndLabels - dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) - else: - dataset = path # dataset - - # Get label wh - shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) - wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh - - # Filter - i = (wh0 < 3.0).any(1).sum() - if i: - print('WARNING: Extremely small objects found. ' - '%g of %g labels are < 3 pixels in width or height.' % (i, len(wh0))) - wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels - - # Kmeans calculation - print('Running kmeans for %g anchors on %g points...' % (n, len(wh))) - s = wh.std(0) # sigmas for whitening - k, dist = kmeans(wh / s, n, iter=30) # points, mean distance - k *= s - wh = torch.tensor(wh, dtype=torch.float32) # filtered - wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered - k = print_results(k) - - # Plot - # k, d = [None] * 20, [None] * 20 - # for i in tqdm(range(1, 21)): - # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance - # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) - # ax = ax.ravel() - # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') - # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh - # ax[0].hist(wh[wh[:, 0]<100, 0],400) - # ax[1].hist(wh[wh[:, 1]<100, 1],400) - # fig.tight_layout() - # fig.savefig('wh.png', dpi=200) - - # Evolve - npr = np.random - f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma - pbar = tqdm(range(gen), desc='Evolving anchors with Genetic Algorithm') # progress bar - for _ in pbar: - v = np.ones(sh) - while (v == 1).all(): # mutate until a change occurs (prevent duplicates) - v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) - kg = (k.copy() * v).clip(min=2.0) - fg = anchor_fitness(kg) - if fg > f: - f, k = fg, kg.copy() - pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f - if verbose: - print_results(k) - - return print_results(k) diff --git a/spaces/kazuk/youtube-whisper/README.md b/spaces/kazuk/youtube-whisper/README.md deleted file mode 100644 index c54490478062aadc443462183ffe417825024c3e..0000000000000000000000000000000000000000 --- a/spaces/kazuk/youtube-whisper/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Youtube Whisper -emoji: ⚡ -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false -license: unknown ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kdb8756/Pip_Counter/README.md b/spaces/kdb8756/Pip_Counter/README.md deleted file mode 100644 index 4550338036a0222a4243fc09a1af567f0f8074bf..0000000000000000000000000000000000000000 --- a/spaces/kdb8756/Pip_Counter/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Dominopipcounter -emoji: 💩 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/keithhon/Real-Time-Voice-Cloning/synthesizer/hparams.py b/spaces/keithhon/Real-Time-Voice-Cloning/synthesizer/hparams.py deleted file mode 100644 index f7d38f0aa4c34d11349e40dbb9861b1aec2dcb8b..0000000000000000000000000000000000000000 --- a/spaces/keithhon/Real-Time-Voice-Cloning/synthesizer/hparams.py +++ /dev/null @@ -1,92 +0,0 @@ -import ast -import pprint - -class HParams(object): - def __init__(self, **kwargs): self.__dict__.update(kwargs) - def __setitem__(self, key, value): setattr(self, key, value) - def __getitem__(self, key): return getattr(self, key) - def __repr__(self): return pprint.pformat(self.__dict__) - - def parse(self, string): - # Overrides hparams from a comma-separated string of name=value pairs - if len(string) > 0: - overrides = [s.split("=") for s in string.split(",")] - keys, values = zip(*overrides) - keys = list(map(str.strip, keys)) - values = list(map(str.strip, values)) - for k in keys: - self.__dict__[k] = ast.literal_eval(values[keys.index(k)]) - return self - -hparams = HParams( - ### Signal Processing (used in both synthesizer and vocoder) - sample_rate = 16000, - n_fft = 800, - num_mels = 80, - hop_size = 200, # Tacotron uses 12.5 ms frame shift (set to sample_rate * 0.0125) - win_size = 800, # Tacotron uses 50 ms frame length (set to sample_rate * 0.050) - fmin = 55, - min_level_db = -100, - ref_level_db = 20, - max_abs_value = 4., # Gradient explodes if too big, premature convergence if too small. - preemphasis = 0.97, # Filter coefficient to use if preemphasize is True - preemphasize = True, - - ### Tacotron Text-to-Speech (TTS) - tts_embed_dims = 512, # Embedding dimension for the graphemes/phoneme inputs - tts_encoder_dims = 256, - tts_decoder_dims = 128, - tts_postnet_dims = 512, - tts_encoder_K = 5, - tts_lstm_dims = 1024, - tts_postnet_K = 5, - tts_num_highways = 4, - tts_dropout = 0.5, - tts_cleaner_names = ["english_cleaners"], - tts_stop_threshold = -3.4, # Value below which audio generation ends. - # For example, for a range of [-4, 4], this - # will terminate the sequence at the first - # frame that has all values < -3.4 - - ### Tacotron Training - tts_schedule = [(2, 1e-3, 20_000, 12), # Progressive training schedule - (2, 5e-4, 40_000, 12), # (r, lr, step, batch_size) - (2, 2e-4, 80_000, 12), # - (2, 1e-4, 160_000, 12), # r = reduction factor (# of mel frames - (2, 3e-5, 320_000, 12), # synthesized for each decoder iteration) - (2, 1e-5, 640_000, 12)], # lr = learning rate - - tts_clip_grad_norm = 1.0, # clips the gradient norm to prevent explosion - set to None if not needed - tts_eval_interval = 500, # Number of steps between model evaluation (sample generation) - # Set to -1 to generate after completing epoch, or 0 to disable - - tts_eval_num_samples = 1, # Makes this number of samples - - ### Data Preprocessing - max_mel_frames = 900, - rescale = True, - rescaling_max = 0.9, - synthesis_batch_size = 16, # For vocoder preprocessing and inference. - - ### Mel Visualization and Griffin-Lim - signal_normalization = True, - power = 1.5, - griffin_lim_iters = 60, - - ### Audio processing options - fmax = 7600, # Should not exceed (sample_rate // 2) - allow_clipping_in_normalization = True, # Used when signal_normalization = True - clip_mels_length = True, # If true, discards samples exceeding max_mel_frames - use_lws = False, # "Fast spectrogram phase recovery using local weighted sums" - symmetric_mels = True, # Sets mel range to [-max_abs_value, max_abs_value] if True, - # and [0, max_abs_value] if False - trim_silence = True, # Use with sample_rate of 16000 for best results - - ### SV2TTS - speaker_embedding_size = 256, # Dimension for the speaker embedding - silence_min_duration_split = 0.4, # Duration in seconds of a silence for an utterance to be split - utterance_min_duration = 1.6, # Duration in seconds below which utterances are discarded - ) - -def hparams_debug_string(): - return str(hparams) diff --git a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/speaker_encoder/data_objects/speaker_verification_dataset.py b/spaces/kevinwang676/ChatGLM2-VC-SadTalker/speaker_encoder/data_objects/speaker_verification_dataset.py deleted file mode 100644 index cecd8ed8ac100b80d5087fa47f22f92c84fea032..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/speaker_encoder/data_objects/speaker_verification_dataset.py +++ /dev/null @@ -1,56 +0,0 @@ -from speaker_encoder.data_objects.random_cycler import RandomCycler -from speaker_encoder.data_objects.speaker_batch import SpeakerBatch -from speaker_encoder.data_objects.speaker import Speaker -from speaker_encoder.params_data import partials_n_frames -from torch.utils.data import Dataset, DataLoader -from pathlib import Path - -# TODO: improve with a pool of speakers for data efficiency - -class SpeakerVerificationDataset(Dataset): - def __init__(self, datasets_root: Path): - self.root = datasets_root - speaker_dirs = [f for f in self.root.glob("*") if f.is_dir()] - if len(speaker_dirs) == 0: - raise Exception("No speakers found. Make sure you are pointing to the directory " - "containing all preprocessed speaker directories.") - self.speakers = [Speaker(speaker_dir) for speaker_dir in speaker_dirs] - self.speaker_cycler = RandomCycler(self.speakers) - - def __len__(self): - return int(1e10) - - def __getitem__(self, index): - return next(self.speaker_cycler) - - def get_logs(self): - log_string = "" - for log_fpath in self.root.glob("*.txt"): - with log_fpath.open("r") as log_file: - log_string += "".join(log_file.readlines()) - return log_string - - -class SpeakerVerificationDataLoader(DataLoader): - def __init__(self, dataset, speakers_per_batch, utterances_per_speaker, sampler=None, - batch_sampler=None, num_workers=0, pin_memory=False, timeout=0, - worker_init_fn=None): - self.utterances_per_speaker = utterances_per_speaker - - super().__init__( - dataset=dataset, - batch_size=speakers_per_batch, - shuffle=False, - sampler=sampler, - batch_sampler=batch_sampler, - num_workers=num_workers, - collate_fn=self.collate, - pin_memory=pin_memory, - drop_last=False, - timeout=timeout, - worker_init_fn=worker_init_fn - ) - - def collate(self, speakers): - return SpeakerBatch(speakers, self.utterances_per_speaker, partials_n_frames) - \ No newline at end of file diff --git a/spaces/kevinwang676/VoiceChanger/src/face3d/models/arcface_torch/configs/ms1mv3_r34.py b/spaces/kevinwang676/VoiceChanger/src/face3d/models/arcface_torch/configs/ms1mv3_r34.py deleted file mode 100644 index 5f78337a3d1f9eb6e9145eb5093618796c6842d2..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VoiceChanger/src/face3d/models/arcface_torch/configs/ms1mv3_r34.py +++ /dev/null @@ -1,26 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.loss = "arcface" -config.network = "r34" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 128 -config.lr = 0.1 # batch size is 512 - -config.rec = "/train_tmp/ms1m-retinaface-t1" -config.num_classes = 93431 -config.num_image = 5179510 -config.num_epoch = 25 -config.warmup_epoch = -1 -config.decay_epoch = [10, 16, 22] -config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/spaces/kira4424/Tacotron-zero-short-voice-clone/web/config/__init__.py b/spaces/kira4424/Tacotron-zero-short-voice-clone/web/config/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/koajoel/PolyFormer/fairseq/CONTRIBUTING.md b/spaces/koajoel/PolyFormer/fairseq/CONTRIBUTING.md deleted file mode 100644 index 3930c46196b7b6082cacc76fd5808b49677ae805..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/fairseq/CONTRIBUTING.md +++ /dev/null @@ -1,28 +0,0 @@ -# Contributing to Facebook AI Research Sequence-to-Sequence Toolkit (fairseq) -We want to make contributing to this project as easy and transparent as -possible. - -## Pull Requests -We actively welcome your pull requests. - -1. Fork the repo and create your branch from `main`. -2. If you've added code that should be tested, add tests. -3. If you've changed APIs, update the documentation. -4. Ensure the test suite passes. -5. Make sure your code lints. -6. If you haven't already, complete the Contributor License Agreement ("CLA"). - -## Contributor License Agreement ("CLA") -In order to accept your pull request, we need you to submit a CLA. You only need -to do this once to work on any of Facebook's open source projects. - -Complete your CLA here: - -## Issues -We use GitHub issues to track public bugs. Please ensure your description is -clear and has sufficient instructions to be able to reproduce the issue. - -## License -By contributing to Facebook AI Research Sequence-to-Sequence Toolkit (fairseq), -you agree that your contributions will be licensed under the LICENSE file in -the root directory of this source tree. diff --git a/spaces/kquote03/lama-video-watermark-remover/models/ade20k/segm_lib/nn/modules/tests/test_sync_batchnorm.py b/spaces/kquote03/lama-video-watermark-remover/models/ade20k/segm_lib/nn/modules/tests/test_sync_batchnorm.py deleted file mode 100644 index 45bb3c8cfd36d8f668e6fde756b17587eab72082..0000000000000000000000000000000000000000 --- a/spaces/kquote03/lama-video-watermark-remover/models/ade20k/segm_lib/nn/modules/tests/test_sync_batchnorm.py +++ /dev/null @@ -1,111 +0,0 @@ -# -*- coding: utf-8 -*- -# File : test_sync_batchnorm.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. - -import unittest - -import torch -import torch.nn as nn -from torch.autograd import Variable - -from sync_batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, DataParallelWithCallback -from sync_batchnorm.unittest import TorchTestCase - - -def handy_var(a, unbias=True): - n = a.size(0) - asum = a.sum(dim=0) - as_sum = (a ** 2).sum(dim=0) # a square sum - sumvar = as_sum - asum * asum / n - if unbias: - return sumvar / (n - 1) - else: - return sumvar / n - - -def _find_bn(module): - for m in module.modules(): - if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, SynchronizedBatchNorm1d, SynchronizedBatchNorm2d)): - return m - - -class SyncTestCase(TorchTestCase): - def _syncParameters(self, bn1, bn2): - bn1.reset_parameters() - bn2.reset_parameters() - if bn1.affine and bn2.affine: - bn2.weight.data.copy_(bn1.weight.data) - bn2.bias.data.copy_(bn1.bias.data) - - def _checkBatchNormResult(self, bn1, bn2, input, is_train, cuda=False): - """Check the forward and backward for the customized batch normalization.""" - bn1.train(mode=is_train) - bn2.train(mode=is_train) - - if cuda: - input = input.cuda() - - self._syncParameters(_find_bn(bn1), _find_bn(bn2)) - - input1 = Variable(input, requires_grad=True) - output1 = bn1(input1) - output1.sum().backward() - input2 = Variable(input, requires_grad=True) - output2 = bn2(input2) - output2.sum().backward() - - self.assertTensorClose(input1.data, input2.data) - self.assertTensorClose(output1.data, output2.data) - self.assertTensorClose(input1.grad, input2.grad) - self.assertTensorClose(_find_bn(bn1).running_mean, _find_bn(bn2).running_mean) - self.assertTensorClose(_find_bn(bn1).running_var, _find_bn(bn2).running_var) - - def testSyncBatchNormNormalTrain(self): - bn = nn.BatchNorm1d(10) - sync_bn = SynchronizedBatchNorm1d(10) - - self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), True) - - def testSyncBatchNormNormalEval(self): - bn = nn.BatchNorm1d(10) - sync_bn = SynchronizedBatchNorm1d(10) - - self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), False) - - def testSyncBatchNormSyncTrain(self): - bn = nn.BatchNorm1d(10, eps=1e-5, affine=False) - sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - - bn.cuda() - sync_bn.cuda() - - self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), True, cuda=True) - - def testSyncBatchNormSyncEval(self): - bn = nn.BatchNorm1d(10, eps=1e-5, affine=False) - sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - - bn.cuda() - sync_bn.cuda() - - self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), False, cuda=True) - - def testSyncBatchNorm2DSyncTrain(self): - bn = nn.BatchNorm2d(10) - sync_bn = SynchronizedBatchNorm2d(10) - sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - - bn.cuda() - sync_bn.cuda() - - self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10, 16, 16), True, cuda=True) - - -if __name__ == '__main__': - unittest.main() diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/exceptiongroup/__init__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/exceptiongroup/__init__.py deleted file mode 100644 index 0e7e02bcf3bc0eb65f8001ca5f530b53d293c31c..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/exceptiongroup/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -__all__ = [ - "BaseExceptionGroup", - "ExceptionGroup", - "catch", - "format_exception", - "format_exception_only", - "print_exception", - "print_exc", -] - -import os -import sys - -from ._catch import catch -from ._version import version as __version__ # noqa: F401 - -if sys.version_info < (3, 11): - from ._exceptions import BaseExceptionGroup, ExceptionGroup - from ._formatting import ( - format_exception, - format_exception_only, - print_exc, - print_exception, - ) - - if os.getenv("EXCEPTIONGROUP_NO_PATCH") != "1": - from . import _formatting # noqa: F401 - - BaseExceptionGroup.__module__ = __name__ - ExceptionGroup.__module__ = __name__ -else: - from traceback import ( - format_exception, - format_exception_only, - print_exc, - print_exception, - ) - - BaseExceptionGroup = BaseExceptionGroup - ExceptionGroup = ExceptionGroup diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/exceptiongroup/_catch.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/exceptiongroup/_catch.py deleted file mode 100644 index aa16d16b4c5a8e5f22e5a40c4c97257b1ad472e4..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/exceptiongroup/_catch.py +++ /dev/null @@ -1,114 +0,0 @@ -from __future__ import annotations - -import sys -from collections.abc import Callable, Iterable, Mapping -from contextlib import AbstractContextManager -from types import TracebackType -from typing import TYPE_CHECKING, Any - -if sys.version_info < (3, 11): - from ._exceptions import BaseExceptionGroup - -if TYPE_CHECKING: - _Handler = Callable[[BaseException], Any] - - -class _Catcher: - def __init__(self, handler_map: Mapping[tuple[type[BaseException], ...], _Handler]): - self._handler_map = handler_map - - def __enter__(self) -> None: - pass - - def __exit__( - self, - etype: type[BaseException] | None, - exc: BaseException | None, - tb: TracebackType | None, - ) -> bool: - if exc is not None: - unhandled = self.handle_exception(exc) - if unhandled is exc: - return False - elif unhandled is None: - return True - else: - raise unhandled from None - - return False - - def handle_exception(self, exc: BaseException) -> BaseExceptionGroup | None: - excgroup: BaseExceptionGroup | None - if isinstance(exc, BaseExceptionGroup): - excgroup = exc - else: - excgroup = BaseExceptionGroup("", [exc]) - - new_exceptions: list[BaseException] = [] - for exc_types, handler in self._handler_map.items(): - matched, excgroup = excgroup.split(exc_types) - if matched: - try: - handler(matched) - except BaseException as new_exc: - new_exceptions.append(new_exc) - - if not excgroup: - break - - if new_exceptions: - if excgroup: - new_exceptions.append(excgroup) - - return BaseExceptionGroup("", new_exceptions) - elif ( - excgroup and len(excgroup.exceptions) == 1 and excgroup.exceptions[0] is exc - ): - return exc - else: - return excgroup - - -def catch( - __handlers: Mapping[type[BaseException] | Iterable[type[BaseException]], _Handler] -) -> AbstractContextManager[None]: - if not isinstance(__handlers, Mapping): - raise TypeError("the argument must be a mapping") - - handler_map: dict[ - tuple[type[BaseException], ...], Callable[[BaseExceptionGroup]] - ] = {} - for type_or_iterable, handler in __handlers.items(): - iterable: tuple[type[BaseException]] - if isinstance(type_or_iterable, type) and issubclass( - type_or_iterable, BaseException - ): - iterable = (type_or_iterable,) - elif isinstance(type_or_iterable, Iterable): - iterable = tuple(type_or_iterable) - else: - raise TypeError( - "each key must be either an exception classes or an iterable thereof" - ) - - if not callable(handler): - raise TypeError("handlers must be callable") - - for exc_type in iterable: - if not isinstance(exc_type, type) or not issubclass( - exc_type, BaseException - ): - raise TypeError( - "each key must be either an exception classes or an iterable " - "thereof" - ) - - if issubclass(exc_type, BaseExceptionGroup): - raise TypeError( - "catching ExceptionGroup with catch() is not allowed. " - "Use except instead." - ) - - handler_map[iterable] = handler - - return _Catcher(handler_map) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-36e305d6.js b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-36e305d6.js deleted file mode 100644 index 6411c424621fcf158ab371b77f194cc82cb36bba..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-36e305d6.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as o,i as s,s as a}from"./index-8c3da1d9.js";class n extends o{constructor(e){super(),s(this,e,null,null,a,{})}}const c=n,p=["static"],i=t=>({type:{payload:"Any"},description:{payload:"stored state value"},example_data:""});export{c as Component,i as document,p as modes}; -//# sourceMappingURL=index-36e305d6.js.map diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-9da94804.css b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-9da94804.css deleted file mode 100644 index 79d901421a55ea578fdaf2c50c84e8fafcea8c41..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-9da94804.css +++ /dev/null @@ -1 +0,0 @@ -div.svelte-1gww5xe{display:flex;position:absolute;justify-content:center;align-items:center;border-radius:var(--radius-sm);background-color:#000c;padding:var(--size-1) .4rem;color:#fff;font-size:var(--text-sm)}span.svelte-1gww5xe{display:inline-block;margin-right:var(--size-1);border-radius:var(--radius-xs);width:var(--size-3);height:var(--size-3)}.wrap.svelte-1mjxput{margin-top:var(--size-3)}.legend.svelte-1mjxput{display:flex;justify-content:center;align-items:center;color:var(--body-text-color)}.legend-item.svelte-1mjxput{display:flex;align-items:center;gap:var(--spacing-sm);margin-right:var(--size-2);margin-left:var(--size-2)}.legend-box.svelte-1mjxput{display:inline-block;border-radius:var(--radius-xs);width:var(--size-3);height:var(--size-3)}svg.svelte-1mjxput{width:var(--size-full)}.label-text.svelte-1mjxput{fill:var(--body-text-color);font-size:var(--text-sm);font-family:var(--font-mono)}.main-label.svelte-1mjxput{display:flex;justify-content:center;align-items:center;color:var(--body-text-color)}.chart.svelte-etmurc{display:flex;display:relative;justify-content:center;align-items:center;background:var(--background-fill-primary);width:var(--size-full);height:var(--size-64)} diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/httpx/_config.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/httpx/_config.py deleted file mode 100644 index f46a5bfe6ba6093688c7a91bd51de9d137840432..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/httpx/_config.py +++ /dev/null @@ -1,369 +0,0 @@ -import logging -import os -import ssl -import sys -import typing -from pathlib import Path - -import certifi - -from ._compat import set_minimum_tls_version_1_2 -from ._models import Headers -from ._types import CertTypes, HeaderTypes, TimeoutTypes, URLTypes, VerifyTypes -from ._urls import URL -from ._utils import get_ca_bundle_from_env - -DEFAULT_CIPHERS = ":".join( - [ - "ECDHE+AESGCM", - "ECDHE+CHACHA20", - "DHE+AESGCM", - "DHE+CHACHA20", - "ECDH+AESGCM", - "DH+AESGCM", - "ECDH+AES", - "DH+AES", - "RSA+AESGCM", - "RSA+AES", - "!aNULL", - "!eNULL", - "!MD5", - "!DSS", - ] -) - - -logger = logging.getLogger("httpx") - - -class UnsetType: - pass # pragma: no cover - - -UNSET = UnsetType() - - -def create_ssl_context( - cert: typing.Optional[CertTypes] = None, - verify: VerifyTypes = True, - trust_env: bool = True, - http2: bool = False, -) -> ssl.SSLContext: - return SSLConfig( - cert=cert, verify=verify, trust_env=trust_env, http2=http2 - ).ssl_context - - -class SSLConfig: - """ - SSL Configuration. - """ - - DEFAULT_CA_BUNDLE_PATH = Path(certifi.where()) - - def __init__( - self, - *, - cert: typing.Optional[CertTypes] = None, - verify: VerifyTypes = True, - trust_env: bool = True, - http2: bool = False, - ): - self.cert = cert - self.verify = verify - self.trust_env = trust_env - self.http2 = http2 - self.ssl_context = self.load_ssl_context() - - def load_ssl_context(self) -> ssl.SSLContext: - logger.debug( - "load_ssl_context verify=%r cert=%r trust_env=%r http2=%r", - self.verify, - self.cert, - self.trust_env, - self.http2, - ) - - if self.verify: - return self.load_ssl_context_verify() - return self.load_ssl_context_no_verify() - - def load_ssl_context_no_verify(self) -> ssl.SSLContext: - """ - Return an SSL context for unverified connections. - """ - context = self._create_default_ssl_context() - context.check_hostname = False - context.verify_mode = ssl.CERT_NONE - self._load_client_certs(context) - return context - - def load_ssl_context_verify(self) -> ssl.SSLContext: - """ - Return an SSL context for verified connections. - """ - if self.trust_env and self.verify is True: - ca_bundle = get_ca_bundle_from_env() - if ca_bundle is not None: - self.verify = ca_bundle - - if isinstance(self.verify, ssl.SSLContext): - # Allow passing in our own SSLContext object that's pre-configured. - context = self.verify - self._load_client_certs(context) - return context - elif isinstance(self.verify, bool): - ca_bundle_path = self.DEFAULT_CA_BUNDLE_PATH - elif Path(self.verify).exists(): - ca_bundle_path = Path(self.verify) - else: - raise IOError( - "Could not find a suitable TLS CA certificate bundle, " - "invalid path: {}".format(self.verify) - ) - - context = self._create_default_ssl_context() - context.verify_mode = ssl.CERT_REQUIRED - context.check_hostname = True - - # Signal to server support for PHA in TLS 1.3. Raises an - # AttributeError if only read-only access is implemented. - if sys.version_info >= (3, 8): # pragma: no cover - try: - context.post_handshake_auth = True - except AttributeError: # pragma: no cover - pass - - # Disable using 'commonName' for SSLContext.check_hostname - # when the 'subjectAltName' extension isn't available. - try: - context.hostname_checks_common_name = False - except AttributeError: # pragma: no cover - pass - - if ca_bundle_path.is_file(): - cafile = str(ca_bundle_path) - logger.debug("load_verify_locations cafile=%r", cafile) - context.load_verify_locations(cafile=cafile) - elif ca_bundle_path.is_dir(): - capath = str(ca_bundle_path) - logger.debug("load_verify_locations capath=%r", capath) - context.load_verify_locations(capath=capath) - - self._load_client_certs(context) - - return context - - def _create_default_ssl_context(self) -> ssl.SSLContext: - """ - Creates the default SSLContext object that's used for both verified - and unverified connections. - """ - context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) - set_minimum_tls_version_1_2(context) - context.options |= ssl.OP_NO_COMPRESSION - context.set_ciphers(DEFAULT_CIPHERS) - - if ssl.HAS_ALPN: - alpn_idents = ["http/1.1", "h2"] if self.http2 else ["http/1.1"] - context.set_alpn_protocols(alpn_idents) - - if sys.version_info >= (3, 8): # pragma: no cover - keylogfile = os.environ.get("SSLKEYLOGFILE") - if keylogfile and self.trust_env: - context.keylog_filename = keylogfile - - return context - - def _load_client_certs(self, ssl_context: ssl.SSLContext) -> None: - """ - Loads client certificates into our SSLContext object - """ - if self.cert is not None: - if isinstance(self.cert, str): - ssl_context.load_cert_chain(certfile=self.cert) - elif isinstance(self.cert, tuple) and len(self.cert) == 2: - ssl_context.load_cert_chain(certfile=self.cert[0], keyfile=self.cert[1]) - elif isinstance(self.cert, tuple) and len(self.cert) == 3: - ssl_context.load_cert_chain( - certfile=self.cert[0], - keyfile=self.cert[1], - password=self.cert[2], # type: ignore - ) - - -class Timeout: - """ - Timeout configuration. - - **Usage**: - - Timeout(None) # No timeouts. - Timeout(5.0) # 5s timeout on all operations. - Timeout(None, connect=5.0) # 5s timeout on connect, no other timeouts. - Timeout(5.0, connect=10.0) # 10s timeout on connect. 5s timeout elsewhere. - Timeout(5.0, pool=None) # No timeout on acquiring connection from pool. - # 5s timeout elsewhere. - """ - - def __init__( - self, - timeout: typing.Union[TimeoutTypes, UnsetType] = UNSET, - *, - connect: typing.Union[None, float, UnsetType] = UNSET, - read: typing.Union[None, float, UnsetType] = UNSET, - write: typing.Union[None, float, UnsetType] = UNSET, - pool: typing.Union[None, float, UnsetType] = UNSET, - ): - if isinstance(timeout, Timeout): - # Passed as a single explicit Timeout. - assert connect is UNSET - assert read is UNSET - assert write is UNSET - assert pool is UNSET - self.connect = timeout.connect # type: typing.Optional[float] - self.read = timeout.read # type: typing.Optional[float] - self.write = timeout.write # type: typing.Optional[float] - self.pool = timeout.pool # type: typing.Optional[float] - elif isinstance(timeout, tuple): - # Passed as a tuple. - self.connect = timeout[0] - self.read = timeout[1] - self.write = None if len(timeout) < 3 else timeout[2] - self.pool = None if len(timeout) < 4 else timeout[3] - elif not ( - isinstance(connect, UnsetType) - or isinstance(read, UnsetType) - or isinstance(write, UnsetType) - or isinstance(pool, UnsetType) - ): - self.connect = connect - self.read = read - self.write = write - self.pool = pool - else: - if isinstance(timeout, UnsetType): - raise ValueError( - "httpx.Timeout must either include a default, or set all " - "four parameters explicitly." - ) - self.connect = timeout if isinstance(connect, UnsetType) else connect - self.read = timeout if isinstance(read, UnsetType) else read - self.write = timeout if isinstance(write, UnsetType) else write - self.pool = timeout if isinstance(pool, UnsetType) else pool - - def as_dict(self) -> typing.Dict[str, typing.Optional[float]]: - return { - "connect": self.connect, - "read": self.read, - "write": self.write, - "pool": self.pool, - } - - def __eq__(self, other: typing.Any) -> bool: - return ( - isinstance(other, self.__class__) - and self.connect == other.connect - and self.read == other.read - and self.write == other.write - and self.pool == other.pool - ) - - def __repr__(self) -> str: - class_name = self.__class__.__name__ - if len({self.connect, self.read, self.write, self.pool}) == 1: - return f"{class_name}(timeout={self.connect})" - return ( - f"{class_name}(connect={self.connect}, " - f"read={self.read}, write={self.write}, pool={self.pool})" - ) - - -class Limits: - """ - Configuration for limits to various client behaviors. - - **Parameters:** - - * **max_connections** - The maximum number of concurrent connections that may be - established. - * **max_keepalive_connections** - Allow the connection pool to maintain - keep-alive connections below this point. Should be less than or equal - to `max_connections`. - * **keepalive_expiry** - Time limit on idle keep-alive connections in seconds. - """ - - def __init__( - self, - *, - max_connections: typing.Optional[int] = None, - max_keepalive_connections: typing.Optional[int] = None, - keepalive_expiry: typing.Optional[float] = 5.0, - ): - self.max_connections = max_connections - self.max_keepalive_connections = max_keepalive_connections - self.keepalive_expiry = keepalive_expiry - - def __eq__(self, other: typing.Any) -> bool: - return ( - isinstance(other, self.__class__) - and self.max_connections == other.max_connections - and self.max_keepalive_connections == other.max_keepalive_connections - and self.keepalive_expiry == other.keepalive_expiry - ) - - def __repr__(self) -> str: - class_name = self.__class__.__name__ - return ( - f"{class_name}(max_connections={self.max_connections}, " - f"max_keepalive_connections={self.max_keepalive_connections}, " - f"keepalive_expiry={self.keepalive_expiry})" - ) - - -class Proxy: - def __init__( - self, - url: URLTypes, - *, - auth: typing.Optional[typing.Tuple[str, str]] = None, - headers: typing.Optional[HeaderTypes] = None, - ): - url = URL(url) - headers = Headers(headers) - - if url.scheme not in ("http", "https", "socks5"): - raise ValueError(f"Unknown scheme for proxy URL {url!r}") - - if url.username or url.password: - # Remove any auth credentials from the URL. - auth = (url.username, url.password) - url = url.copy_with(username=None, password=None) - - self.url = url - self.auth = auth - self.headers = headers - - @property - def raw_auth(self) -> typing.Optional[typing.Tuple[bytes, bytes]]: - # The proxy authentication as raw bytes. - return ( - None - if self.auth is None - else (self.auth[0].encode("utf-8"), self.auth[1].encode("utf-8")) - ) - - def __repr__(self) -> str: - # The authentication is represented with the password component masked. - auth = (self.auth[0], "********") if self.auth else None - - # Build a nice concise representation. - url_str = f"{str(self.url)!r}" - auth_str = f", auth={auth!r}" if auth else "" - headers_str = f", headers={dict(self.headers)!r}" if self.headers else "" - return f"Proxy({url_str}{auth_str}{headers_str})" - - -DEFAULT_TIMEOUT_CONFIG = Timeout(timeout=5.0) -DEFAULT_LIMITS = Limits(max_connections=100, max_keepalive_connections=20) -DEFAULT_MAX_REDIRECTS = 20 diff --git a/spaces/leo-bourrel/test-streamlit/README.md b/spaces/leo-bourrel/test-streamlit/README.md deleted file mode 100644 index 96b0514115ca8b50e14f53c8dcfbb0419b288c4d..0000000000000000000000000000000000000000 --- a/spaces/leo-bourrel/test-streamlit/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Test Streamlit -emoji: 📈 -colorFrom: gray -colorTo: indigo -sdk: docker -app_port: 7860 -sdk_version: 1.27.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/lindeberg/whisper-webui/cli.py b/spaces/lindeberg/whisper-webui/cli.py deleted file mode 100644 index 1f75844efed1aa091caafb45b608685b5e40dd4f..0000000000000000000000000000000000000000 --- a/spaces/lindeberg/whisper-webui/cli.py +++ /dev/null @@ -1,118 +0,0 @@ -import argparse -import os -import pathlib -from urllib.parse import urlparse -import warnings -import numpy as np - -import torch -from app import LANGUAGES, WHISPER_MODELS, WhisperTranscriber -from src.download import download_url - -from src.utils import optional_float, optional_int, str2bool -from src.whisperContainer import WhisperContainer - -def cli(): - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("audio", nargs="+", type=str, help="audio file(s) to transcribe") - parser.add_argument("--model", default="small", choices=WHISPER_MODELS, help="name of the Whisper model to use") - parser.add_argument("--model_dir", type=str, default=None, help="the path to save model files; uses ~/.cache/whisper by default") - parser.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu", help="device to use for PyTorch inference") - parser.add_argument("--output_dir", "-o", type=str, default=".", help="directory to save the outputs") - parser.add_argument("--verbose", type=str2bool, default=True, help="whether to print out the progress and debug messages") - - parser.add_argument("--task", type=str, default="transcribe", choices=["transcribe", "translate"], help="whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')") - parser.add_argument("--language", type=str, default=None, choices=sorted(LANGUAGES), help="language spoken in the audio, specify None to perform language detection") - - parser.add_argument("--vad", type=str, default="none", choices=["none", "silero-vad", "silero-vad-skip-gaps", "silero-vad-expand-into-gaps", "periodic-vad"], help="The voice activity detection algorithm to use") - parser.add_argument("--vad_merge_window", type=optional_float, default=5, help="The window size (in seconds) to merge voice segments") - parser.add_argument("--vad_max_merge_size", type=optional_float, default=30, help="The maximum size (in seconds) of a voice segment") - parser.add_argument("--vad_padding", type=optional_float, default=1, help="The padding (in seconds) to add to each voice segment") - parser.add_argument("--vad_prompt_window", type=optional_float, default=3, help="The window size of the prompt to pass to Whisper") - parser.add_argument("--vad_cpu_cores", type=int, default=1, help="The number of CPU cores to use for VAD pre-processing.") - parser.add_argument("--vad_parallel_devices", type=str, default="", help="A commma delimited list of CUDA devices to use for parallel processing. If None, disable parallel processing.") - parser.add_argument("--auto_parallel", type=bool, default=False, help="True to use all available GPUs and CPU cores for processing. Use vad_cpu_cores/vad_parallel_devices to specify the number of CPU cores/GPUs to use.") - - parser.add_argument("--temperature", type=float, default=0, help="temperature to use for sampling") - parser.add_argument("--best_of", type=optional_int, default=5, help="number of candidates when sampling with non-zero temperature") - parser.add_argument("--beam_size", type=optional_int, default=5, help="number of beams in beam search, only applicable when temperature is zero") - parser.add_argument("--patience", type=float, default=None, help="optional patience value to use in beam decoding, as in https://arxiv.org/abs/2204.05424, the default (1.0) is equivalent to conventional beam search") - parser.add_argument("--length_penalty", type=float, default=None, help="optional token length penalty coefficient (alpha) as in https://arxiv.org/abs/1609.08144, uses simple lengt normalization by default") - - parser.add_argument("--suppress_tokens", type=str, default="-1", help="comma-separated list of token ids to suppress during sampling; '-1' will suppress most special characters except common punctuations") - parser.add_argument("--initial_prompt", type=str, default=None, help="optional text to provide as a prompt for the first window.") - parser.add_argument("--condition_on_previous_text", type=str2bool, default=True, help="if True, provide the previous output of the model as a prompt for the next window; disabling may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop") - parser.add_argument("--fp16", type=str2bool, default=True, help="whether to perform inference in fp16; True by default") - - parser.add_argument("--temperature_increment_on_fallback", type=optional_float, default=0.2, help="temperature to increase when falling back when the decoding fails to meet either of the thresholds below") - parser.add_argument("--compression_ratio_threshold", type=optional_float, default=2.4, help="if the gzip compression ratio is higher than this value, treat the decoding as failed") - parser.add_argument("--logprob_threshold", type=optional_float, default=-1.0, help="if the average log probability is lower than this value, treat the decoding as failed") - parser.add_argument("--no_speech_threshold", type=optional_float, default=0.6, help="if the probability of the <|nospeech|> token is higher than this value AND the decoding has failed due to `logprob_threshold`, consider the segment as silence") - - args = parser.parse_args().__dict__ - model_name: str = args.pop("model") - model_dir: str = args.pop("model_dir") - output_dir: str = args.pop("output_dir") - device: str = args.pop("device") - os.makedirs(output_dir, exist_ok=True) - - if model_name.endswith(".en") and args["language"] not in {"en", "English"}: - warnings.warn(f"{model_name} is an English-only model but receipted '{args['language']}'; using English instead.") - args["language"] = "en" - - temperature = args.pop("temperature") - temperature_increment_on_fallback = args.pop("temperature_increment_on_fallback") - if temperature_increment_on_fallback is not None: - temperature = tuple(np.arange(temperature, 1.0 + 1e-6, temperature_increment_on_fallback)) - else: - temperature = [temperature] - - vad = args.pop("vad") - vad_merge_window = args.pop("vad_merge_window") - vad_max_merge_size = args.pop("vad_max_merge_size") - vad_padding = args.pop("vad_padding") - vad_prompt_window = args.pop("vad_prompt_window") - vad_cpu_cores = args.pop("vad_cpu_cores") - auto_parallel = args.pop("auto_parallel") - - model = WhisperContainer(model_name, device=device, download_root=model_dir) - transcriber = WhisperTranscriber(delete_uploaded_files=False, vad_cpu_cores=vad_cpu_cores) - transcriber.set_parallel_devices(args.pop("vad_parallel_devices")) - transcriber.set_auto_parallel(auto_parallel) - - if (transcriber._has_parallel_devices()): - print("Using parallel devices:", transcriber.parallel_device_list) - - for audio_path in args.pop("audio"): - sources = [] - - # Detect URL and download the audio - if (uri_validator(audio_path)): - # Download from YouTube/URL directly - for source_path in download_url(audio_path, maxDuration=-1, destinationDirectory=output_dir, playlistItems=None): - source_name = os.path.basename(source_path) - sources.append({ "path": source_path, "name": source_name }) - else: - sources.append({ "path": audio_path, "name": os.path.basename(audio_path) }) - - for source in sources: - source_path = source["path"] - source_name = source["name"] - - result = transcriber.transcribe_file(model, source_path, temperature=temperature, - vad=vad, vadMergeWindow=vad_merge_window, vadMaxMergeSize=vad_max_merge_size, - vadPadding=vad_padding, vadPromptWindow=vad_prompt_window, **args) - - transcriber.write_result(result, source_name, output_dir) - - transcriber.close() - -def uri_validator(x): - try: - result = urlparse(x) - return all([result.scheme, result.netloc]) - except: - return False - -if __name__ == '__main__': - cli() \ No newline at end of file diff --git a/spaces/lora-x/Backpack/README.md b/spaces/lora-x/Backpack/README.md deleted file mode 100644 index d060ebebfd3996af67debba9a4495ebaecc48f38..0000000000000000000000000000000000000000 --- a/spaces/lora-x/Backpack/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Backpack -emoji: 🏃 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/magicr/BuboGPT/bubogpt/common/dist_utils.py b/spaces/magicr/BuboGPT/bubogpt/common/dist_utils.py deleted file mode 100644 index 9280150bf5122d51bb810a9f0258a233e7088647..0000000000000000000000000000000000000000 --- a/spaces/magicr/BuboGPT/bubogpt/common/dist_utils.py +++ /dev/null @@ -1,137 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import datetime -import functools -import os - -import torch -import torch.distributed as dist -import timm.models.hub as timm_hub - - -def setup_for_distributed(is_master): - """ - This function disables printing when not in master process - """ - import builtins as __builtin__ - - builtin_print = __builtin__.print - - def print(*args, **kwargs): - force = kwargs.pop("force", False) - if is_master or force: - builtin_print(*args, **kwargs) - - __builtin__.print = print - - -def is_dist_avail_and_initialized(): - if not dist.is_available(): - return False - if not dist.is_initialized(): - return False - return True - - -def get_world_size(): - if not is_dist_avail_and_initialized(): - return 1 - return dist.get_world_size() - - -def get_rank(): - if not is_dist_avail_and_initialized(): - return 0 - return dist.get_rank() - - -def is_main_process(): - return get_rank() == 0 - - -def init_distributed_mode(args): - if "RANK" in os.environ and "WORLD_SIZE" in os.environ: - args.rank = int(os.environ["RANK"]) - args.world_size = int(os.environ["WORLD_SIZE"]) - args.gpu = int(os.environ["LOCAL_RANK"]) - elif "SLURM_PROCID" in os.environ: - args.rank = int(os.environ["SLURM_PROCID"]) - args.gpu = args.rank % torch.cuda.device_count() - else: - print("Not using distributed mode") - args.distributed = False - return - - args.distributed = True - - torch.cuda.set_device(args.gpu) - args.dist_backend = "nccl" - print( - "| distributed init (rank {}, world {}): {}".format( - args.rank, args.world_size, args.dist_url - ), - flush=True, - ) - torch.distributed.init_process_group( - backend=args.dist_backend, - init_method=args.dist_url, - world_size=args.world_size, - rank=args.rank, - timeout=datetime.timedelta( - days=365 - ), # allow auto-downloading and de-compressing - ) - torch.distributed.barrier() - setup_for_distributed(args.rank == 0) - - -def get_dist_info(): - if torch.__version__ < "1.0": - initialized = dist._initialized - else: - initialized = dist.is_initialized() - if initialized: - rank = dist.get_rank() - world_size = dist.get_world_size() - else: # non-distributed training - rank = 0 - world_size = 1 - return rank, world_size - - -def main_process(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - rank, _ = get_dist_info() - if rank == 0: - return func(*args, **kwargs) - - return wrapper - - -def download_cached_file(url, check_hash=True, progress=False): - """ - Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again. - If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded. - """ - - def get_cached_file_path(): - # a hack to sync the file path across processes - parts = torch.hub.urlparse(url) - filename = os.path.basename(parts.path) - cached_file = os.path.join(timm_hub.get_cache_dir(), filename) - - return cached_file - - if is_main_process(): - timm_hub.download_cached_file(url, check_hash, progress) - - if is_dist_avail_and_initialized(): - dist.barrier() - - return get_cached_file_path() diff --git a/spaces/matthoffner/starchat-ui/next-i18next.config.js b/spaces/matthoffner/starchat-ui/next-i18next.config.js deleted file mode 100644 index a478a6390ff9716b607da65fc20199228917cdaa..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/starchat-ui/next-i18next.config.js +++ /dev/null @@ -1,33 +0,0 @@ -module.exports = { - i18n: { - defaultLocale: 'en', - locales: [ - "bn", - "de", - "en", - "es", - "fr", - "he", - "id", - "it", - "ja", - "ko", - "pl", - "pt", - "ru", - "ro", - "sv", - "te", - "vi", - "zh", - "ar", - "tr", - "ca", - "fi", - ], - }, - localePath: - typeof window === 'undefined' - ? require('path').resolve('./public/locales') - : '/public/locales', -}; diff --git a/spaces/merve/anonymization/public/third_party/d3-scale-chromatic.v1.min.js b/spaces/merve/anonymization/public/third_party/d3-scale-chromatic.v1.min.js deleted file mode 100644 index 90b8e6953cea11cade766bc4f143ecce4bd9edf1..0000000000000000000000000000000000000000 --- a/spaces/merve/anonymization/public/third_party/d3-scale-chromatic.v1.min.js +++ /dev/null @@ -1,2 +0,0 @@ -// https://d3js.org/d3-scale-chromatic/ v1.5.0 Copyright 2019 Mike Bostock -!function(f,e){"object"==typeof exports&&"undefined"!=typeof module?e(exports,require("d3-interpolate"),require("d3-color")):"function"==typeof define&&define.amd?define(["exports","d3-interpolate","d3-color"],e):e((f=f||self).d3=f.d3||{},f.d3,f.d3)}(this,function(f,e,d){"use strict";function a(f){for(var e=f.length/6|0,d=new Array(e),a=0;a1)&&(f-=Math.floor(f));var e=Math.abs(f-.5);return wf.h=360*f-100,wf.s=1.5-1.5*e,wf.l=.8-.9*e,wf+""},f.interpolateRdBu=x,f.interpolateRdGy=g,f.interpolateRdPu=N,f.interpolateRdYlBu=v,f.interpolateRdYlGn=C,f.interpolateReds=hf,f.interpolateSinebow=function(f){var e;return f=(.5-f)*Math.PI,Af.r=255*(e=Math.sin(f))*e,Af.g=255*(e=Math.sin(f+Pf))*e,Af.b=255*(e=Math.sin(f+Bf))*e,Af+""},f.interpolateSpectral=I,f.interpolateTurbo=function(f){return f=Math.max(0,Math.min(1,f)),"rgb("+Math.max(0,Math.min(255,Math.round(34.61+f*(1172.33-f*(10793.56-f*(33300.12-f*(38394.49-14825.05*f)))))))+", "+Math.max(0,Math.min(255,Math.round(23.31+f*(557.33+f*(1225.33-f*(3574.96-f*(1073.77+707.56*f)))))))+", "+Math.max(0,Math.min(255,Math.round(27.2+f*(3211.1-f*(15327.97-f*(27814-f*(22569.18-6838.66*f)))))))+")"},f.interpolateViridis=xf,f.interpolateWarm=yf,f.interpolateYlGn=Z,f.interpolateYlGnBu=U,f.interpolateYlOrBr=ff,f.interpolateYlOrRd=df,f.schemeAccent=b,f.schemeBlues=af,f.schemeBrBG=u,f.schemeBuGn=L,f.schemeBuPu=q,f.schemeCategory10=c,f.schemeDark2=t,f.schemeGnBu=T,f.schemeGreens=bf,f.schemeGreys=nf,f.schemeOrRd=k,f.schemeOranges=pf,f.schemePRGn=y,f.schemePaired=n,f.schemePastel1=r,f.schemePastel2=o,f.schemePiYG=w,f.schemePuBu=E,f.schemePuBuGn=W,f.schemePuOr=P,f.schemePuRd=H,f.schemePurples=of,f.schemeRdBu=G,f.schemeRdGy=R,f.schemeRdPu=K,f.schemeRdYlBu=Y,f.schemeRdYlGn=O,f.schemeReds=mf,f.schemeSet1=i,f.schemeSet2=l,f.schemeSet3=m,f.schemeSpectral=S,f.schemeTableau10=h,f.schemeYlGn=X,f.schemeYlGnBu=Q,f.schemeYlOrBr=$,f.schemeYlOrRd=ef,Object.defineProperty(f,"__esModule",{value:!0})}); \ No newline at end of file diff --git a/spaces/merve/data-leak/server-side/fill-in-the-blank/gender-over-time-colab/watch-files.js b/spaces/merve/data-leak/server-side/fill-in-the-blank/gender-over-time-colab/watch-files.js deleted file mode 100644 index c243ec0c0726b96afe9727d6648fdbc18b4e8ad8..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/server-side/fill-in-the-blank/gender-over-time-colab/watch-files.js +++ /dev/null @@ -1,38 +0,0 @@ -function watchFile(path, type){ - var lastStr = '' - - console.log(path) - function check(){ - d3.text(path + '?' + Math.random(), (err, nextStr) => { - if (err){ - console.log(err) - return check() - } - - if (nextStr == lastStr) return - lastStr = nextStr - - if (path.includes('.js')){ - console.clear() - console.log('js', new Date()) - - Function(nextStr.replace('\n', ';').replace('\n', ';'))() - } - - if (path.includes('.css')){ - console.log('css', new Date()) - - Array.from(document.querySelectorAll('link')) - .filter(d => d.href.includes(path)) - .forEach(d => d.href = d.href.split('?')[0] + '?' + Math.random()) - } - }) - - setTimeout(check, window.timeoutMS || 9999999999) - } - check() -} - - -watchFile('https://roadtolarissa.com/colab/gender-over-time-colab/style.css', 'js') -watchFile('https://roadtolarissa.com/colab/gender-over-time-colab/script.js', 'js') diff --git a/spaces/merve/measuring-fairness/source/measuring-fairness/sel.js b/spaces/merve/measuring-fairness/source/measuring-fairness/sel.js deleted file mode 100644 index 0aefefe517d53ca634ed6e58d6cf8554cc386afa..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/source/measuring-fairness/sel.js +++ /dev/null @@ -1,151 +0,0 @@ -/* Copyright 2020 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - -window.makeSel = function(){ - var s = c.width/(nCols -2) -1 - - var personSel = c.svg.appendMany('g', students) - var rectSel = personSel.append('rect') - .at({ - height: s, - width: s, - x: -s/2, - y: -s/2, - // fillOpacity: .2 - }) - - var textSel = personSel.append('text.weepeople') - .text(d => d.letter) - .at({fontSize: d => d.isMale ? 26 : 34, dy: '.33em', textAnchor: 'middle'}) - .st({stroke: d => d.isSick ? dcolors.sick : dcolors.well}) - - addSwoop(c) - - var botAxis = c.svg.append('g').translate(c.width + 150, 1) - var truthAxis = botAxis.append('g.axis').translate([0, 0]) - - truthAxis.append('text').text('Truth') - .at({textAnchor: 'middle', fontWeight: 500, x: s*2.65}) - - truthAxis.append('g').translate([45, 22]) - .append('text').text('Sick').parent() - .append('text.weepeople').text('k') - .at({fontSize: 34, x: 22, y: 5}) - .st({fill: colors.sick}) - - truthAxis.append('g').translate([95, 22]) - .append('text').text('Well').parent() - .append('text.weepeople').text('d') - .at({fontSize: 34, fill: colors.well, x: 22, y: 5}) - .st({fill: colors.well}) - - - var mlAxis = botAxis.append('g.axis').translate([220, 0]) - - mlAxis.append('text').text('ML Prediction') - .at({textAnchor: 'middle', fontWeight: 500, x: s*2.8}) - - mlAxis.append('g').translate([35, 22]) - .append('text').text('Sick').parent() - .append('rect') - .at({width: s*.7, height: s*.7, fill: lcolors.sick, x: 28, y: -17}) - - mlAxis.append('g').translate([100, 22]) - .append('text').text('Well').parent() - .append('rect') - .at({width: s*.7, height: s*.7, fill: lcolors.well, x: 28, y: -17}) - - - - var fpAxis = c.svg.append('g.axis') - - // fpAxis.append('rect') - // .translate(nCols*s - 20, 1) - // .at({ - // fill: lcolors.well, - // x: -82, - // y: -12, - // width: 56, - // height: 28, - // // stroke: '#000', - // }) - - // fpAxis.append('text') - // .translate(nCols*s - 20, 1) - // .tspans(['False', 'Negatives'], 12) - // .at({textAnchor: 'end', x: -s/2 - 10, fill: colors.sick}) - - - // fpAxis.append('text') - // .translate(nCols*s, 0) - // .tspans(['False', 'Positives'], 12) - // .at({textAnchor: 'start', x: s/2 + 7, fill: colors.well}) - - - var sexAxis = c.svg.append('g.axis') - - sexAxis.append('text').st({fontWeight: 500, fill: ''}) - .translate([-15, -30]) - .text('Adults') - - sexAxis.append('text').st({fontWeight: 500, fill: ''}) - .translate([-15, -30 + students.maleOffsetPx]) - .text('Children') - - - var brAxis = c.svg.append('g.axis') - var cpx = 0 - - brAxis.append('path') - .translate([-15, -20]) - .at({ - stroke: colors.sick, - fill: 'none', - d: ['M -3 -3 v', -cpx, 'h', students.fSickCols*students.colWidth, 'v', cpx].join('') - }) - - brAxis.append('path') - .translate([-15, -20 + students.maleOffsetPx]) - .at({ - stroke: colors.sick, - fill: 'none', - d: ['M -3 -3 v', -cpx, 'h', students.mSickCols*students.colWidth, 'v', cpx].join('') - }) - - brAxis.append('text').st({fontWeight: 500, fill: colors.sick}) - .translate([-15, -30]) - .text('Sick Adults') - - brAxis.append('text').st({fontWeight: 500, fill: colors.sick}) - .translate([-15, -30 + students.maleOffsetPx]) - .text('Sick Children') - - - - - return {personSel, textSel, rectSel, fpAxis, sexAxis, brAxis, truthAxis, mlAxis, botAxis} -} - - - - - - - - - - -if (window.init) window.init() diff --git a/spaces/merve/uncertainty-calibration/public/uncertainty-calibration/weatherdata.js b/spaces/merve/uncertainty-calibration/public/uncertainty-calibration/weatherdata.js deleted file mode 100644 index 9fb29abd04cf81496773adb6fbab7a1b9cb513e0..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/public/uncertainty-calibration/weatherdata.js +++ /dev/null @@ -1,255 +0,0 @@ -var weatherdata = [{'h': 0, -'id': 0, -'label': 0, -'original_score': 0.12433152687398698, -'score': 0.12433152687398698}, -{'h': 1, -'id': 1, -'label': 0, -'original_score': 0.2014203772169771, -'score': 0.2014203772169771}, -{'h': 2, -'id': 2, -'label': 1, -'original_score': 0.2626685491019668, -'score': 0.2626685491019668}, -{'h': 3, -'id': 3, -'label': 0, -'original_score': 0.10619382887946915, -'score': 0.10619382887946915}, -{'h': 4, -'id': 4, -'label': 0, -'original_score': 0.1536112957212682, -'score': 0.1536112957212682}, -{'h': 5, -'id': 5, -'label': 0, -'original_score': 0.2660219680553572, -'score': 0.2660219680553572}, -{'h': 6, -'id': 6, -'label': 0, -'original_score': 0.1886698681338711, -'score': 0.1886698681338711}, -{'h': 7, -'id': 7, -'label': 0, -'original_score': 0.302266784816097, -'score': 0.302266784816097}, -{'h': 8, -'id': 8, -'label': 0, -'original_score': 0.15496114380196338, -'score': 0.15496114380196338}, -{'h': 9, -'id': 9, -'label': 0, -'original_score': 0.19763504609985533, -'score': 0.19763504609985533}, -{'h': 0, -'id': 10, -'label': 0, -'original_score': 0.38247000184830054, -'score': 0.38247000184830054}, -{'h': 1, -'id': 11, -'label': 1, -'original_score': 0.3363518147573557, -'score': 0.3363518147573557}, -{'h': 2, -'id': 12, -'label': 1, -'original_score': 0.4947967422959128, -'score': 0.4947967422959128}, -{'h': 3, -'id': 13, -'label': 0, -'original_score': 0.38675988136018435, -'score': 0.38675988136018435}, -{'h': 4, -'id': 14, -'label': 0, -'original_score': 0.3755618748258325, -'score': 0.3755618748258325}, -{'h': 5, -'id': 15, -'label': 0, -'original_score': 0.39394252133526547, -'score': 0.39394252133526547}, -{'h': 6, -'id': 16, -'label': 1, -'original_score': 0.47996692559311144, -'score': 0.47996692559311144}, -{'h': 7, -'id': 17, -'label': 0, -'original_score': 0.4520919890835573, -'score': 0.4520919890835573}, -{'h': 8, -'id': 18, -'label': 0, -'original_score': 0.49128398887598235, -'score': 0.49128398887598235}, -{'h': 9, -'id': 19, -'label': 0, -'original_score': 0.4934231460040127, -'score': 0.4934231460040127}, -{'h': 0, -'id': 20, -'label': 1, -'original_score': 0.6023370616966761, -'score': 0.6023370616966761}, -{'h': 1, -'id': 21, -'label': 0, -'original_score': 0.5588319919664324, -'score': 0.5588319919664324}, -{'h': 2, -'id': 22, -'label': 1, -'original_score': 0.5372993269470902, -'score': 0.5372993269470902}, -{'h': 3, -'id': 23, -'label': 1, -'original_score': 0.6056881032306126, -'score': 0.6056881032306126}, -{'h': 4, -'id': 24, -'label': 1, -'original_score': 0.5777333354677878, -'score': 0.5777333354677878}, -{'h': 5, -'id': 25, -'label': 0, -'original_score': 0.5684077659316352, -'score': 0.5684077659316352}, -{'h': 6, -'id': 26, -'label': 0, -'original_score': 0.5583886351009575, -'score': 0.5583886351009575}, -{'h': 7, -'id': 27, -'label': 0, -'original_score': 0.585107016245853, -'score': 0.585107016245853}, -{'h': 4, -'id': 28, -'label': 0, -'original_score': 0.5024398267017434, -'score': 0.5024398267017434}, -{'h': 7, -'id': 29, -'label': 1, -'original_score': 0.5119051369645927, -'score': 0.5119051369645927}, -{'h': 0, -'id': 30, -'label': 1, -'original_score': 0.6874421886689279, -'score': 0.6874421886689279}, -{'h': 1, -'id': 31, -'label': 1, -'original_score': 0.7622939478182656, -'score': 0.7622939478182656}, -{'h': 2, -'id': 32, -'label': 1, -'original_score': 0.8240376576917314, -'score': 0.8240376576917314}, -{'h': 3, -'id': 33, -'label': 0, -'original_score': 0.8491598185092843, -'score': 0.8491598185092843}, -{'h': 4, -'id': 34, -'label': 1, -'original_score': 0.7585879921321647, -'score': 0.7585879921321647}, -{'h': 5, -'id': 35, -'label': 0, -'original_score': 0.76396242565466, -'score': 0.76396242565466}, -{'h': 6, -'id': 36, -'label': 1, -'original_score': 0.7498984213509621, -'score': 0.7498984213509621}, -{'h': 7, -'id': 37, -'label': 1, -'original_score': 0.6642342379293016, -'score': 0.6642342379293016}, -{'h': 8, -'id': 38, -'label': 0, -'original_score': 0.7594027841393808, -'score': 0.7594027841393808}, -{'h': 9, -'id': 39, -'label': 1, -'original_score': 0.816737760918518, -'score': 0.816737760918518}, -{'h': 0, -'id': 40, -'label': 1, -'original_score': 0.8926172493334218, -'score': 0.8926172493334218}, -{'h': 1, -'id': 41, -'label': 0, -'original_score': 0.9194132577983325, -'score': 0.9194132577983325}, -{'h': 2, -'id': 42, -'label': 1, -'original_score': 0.8603862951854552, -'score': 0.8603862951854552}, -{'h': 3, -'id': 43, -'label': 1, -'original_score': 0.9093601089110575, -'score': 0.9093601089110575}, -{'h': 4, -'id': 44, -'label': 1, -'original_score': 0.9442430043437404, -'score': 0.9442430043437404}, -{'h': 5, -'id': 45, -'label': 1, -'original_score': 0.8778942613680896, -'score': 0.8778942613680896}, -{'h': 6, -'id': 46, -'label': 1, -'original_score': 0.8873305075007553, -'score': 0.8873305075007553}, -{'h': 7, -'id': 47, -'label': 1, -'original_score': 0.8786043110234295, -'score': 0.8786043110234295}, -{'h': 8, -'id': 48, -'label': 1, -'original_score': 0.8682870444345626, -'score': 0.8682870444345626}, -{'h': 9, -'id': 49, -'label': 1, -'original_score': 0.8698959578262738, -'score': 0.8698959578262738}] - - -weatherdata.forEach(d => { - d.is_filter = d.label && Math.random() < .6 -}) \ No newline at end of file diff --git a/spaces/mfrashad/CharacterGAN/models/stylegan/stylegan_tf/generate_figures.py b/spaces/mfrashad/CharacterGAN/models/stylegan/stylegan_tf/generate_figures.py deleted file mode 100644 index 45b68b86146198c701a66fb8ba7a363d901d6951..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/CharacterGAN/models/stylegan/stylegan_tf/generate_figures.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -"""Minimal script for reproducing the figures of the StyleGAN paper using pre-trained generators.""" - -import os -import pickle -import numpy as np -import PIL.Image -import dnnlib -import dnnlib.tflib as tflib -import config - -#---------------------------------------------------------------------------- -# Helpers for loading and using pre-trained generators. - -url_ffhq = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl -url_celebahq = 'https://drive.google.com/uc?id=1MGqJl28pN4t7SAtSrPdSRJSQJqahkzUf' # karras2019stylegan-celebahq-1024x1024.pkl -url_bedrooms = 'https://drive.google.com/uc?id=1MOSKeGF0FJcivpBI7s63V9YHloUTORiF' # karras2019stylegan-bedrooms-256x256.pkl -url_cars = 'https://drive.google.com/uc?id=1MJ6iCfNtMIRicihwRorsM3b7mmtmK9c3' # karras2019stylegan-cars-512x384.pkl -url_cats = 'https://drive.google.com/uc?id=1MQywl0FNt6lHu8E_EUqnRbviagS7fbiJ' # karras2019stylegan-cats-256x256.pkl - -synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True), minibatch_size=8) - -_Gs_cache = dict() - -def load_Gs(url): - if url not in _Gs_cache: - with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: - _G, _D, Gs = pickle.load(f) - _Gs_cache[url] = Gs - return _Gs_cache[url] - -#---------------------------------------------------------------------------- -# Figures 2, 3, 10, 11, 12: Multi-resolution grid of uncurated result images. - -def draw_uncurated_result_figure(png, Gs, cx, cy, cw, ch, rows, lods, seed): - print(png) - latents = np.random.RandomState(seed).randn(sum(rows * 2**lod for lod in lods), Gs.input_shape[1]) - images = Gs.run(latents, None, **synthesis_kwargs) # [seed, y, x, rgb] - - canvas = PIL.Image.new('RGB', (sum(cw // 2**lod for lod in lods), ch * rows), 'white') - image_iter = iter(list(images)) - for col, lod in enumerate(lods): - for row in range(rows * 2**lod): - image = PIL.Image.fromarray(next(image_iter), 'RGB') - image = image.crop((cx, cy, cx + cw, cy + ch)) - image = image.resize((cw // 2**lod, ch // 2**lod), PIL.Image.ANTIALIAS) - canvas.paste(image, (sum(cw // 2**lod for lod in lods[:col]), row * ch // 2**lod)) - canvas.save(png) - -#---------------------------------------------------------------------------- -# Figure 3: Style mixing. - -def draw_style_mixing_figure(png, Gs, w, h, src_seeds, dst_seeds, style_ranges): - print(png) - src_latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in src_seeds) - dst_latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in dst_seeds) - src_dlatents = Gs.components.mapping.run(src_latents, None) # [seed, layer, component] - dst_dlatents = Gs.components.mapping.run(dst_latents, None) # [seed, layer, component] - src_images = Gs.components.synthesis.run(src_dlatents, randomize_noise=False, **synthesis_kwargs) - dst_images = Gs.components.synthesis.run(dst_dlatents, randomize_noise=False, **synthesis_kwargs) - - canvas = PIL.Image.new('RGB', (w * (len(src_seeds) + 1), h * (len(dst_seeds) + 1)), 'white') - for col, src_image in enumerate(list(src_images)): - canvas.paste(PIL.Image.fromarray(src_image, 'RGB'), ((col + 1) * w, 0)) - for row, dst_image in enumerate(list(dst_images)): - canvas.paste(PIL.Image.fromarray(dst_image, 'RGB'), (0, (row + 1) * h)) - row_dlatents = np.stack([dst_dlatents[row]] * len(src_seeds)) - row_dlatents[:, style_ranges[row]] = src_dlatents[:, style_ranges[row]] - row_images = Gs.components.synthesis.run(row_dlatents, randomize_noise=False, **synthesis_kwargs) - for col, image in enumerate(list(row_images)): - canvas.paste(PIL.Image.fromarray(image, 'RGB'), ((col + 1) * w, (row + 1) * h)) - canvas.save(png) - -#---------------------------------------------------------------------------- -# Figure 4: Noise detail. - -def draw_noise_detail_figure(png, Gs, w, h, num_samples, seeds): - print(png) - canvas = PIL.Image.new('RGB', (w * 3, h * len(seeds)), 'white') - for row, seed in enumerate(seeds): - latents = np.stack([np.random.RandomState(seed).randn(Gs.input_shape[1])] * num_samples) - images = Gs.run(latents, None, truncation_psi=1, **synthesis_kwargs) - canvas.paste(PIL.Image.fromarray(images[0], 'RGB'), (0, row * h)) - for i in range(4): - crop = PIL.Image.fromarray(images[i + 1], 'RGB') - crop = crop.crop((650, 180, 906, 436)) - crop = crop.resize((w//2, h//2), PIL.Image.NEAREST) - canvas.paste(crop, (w + (i%2) * w//2, row * h + (i//2) * h//2)) - diff = np.std(np.mean(images, axis=3), axis=0) * 4 - diff = np.clip(diff + 0.5, 0, 255).astype(np.uint8) - canvas.paste(PIL.Image.fromarray(diff, 'L'), (w * 2, row * h)) - canvas.save(png) - -#---------------------------------------------------------------------------- -# Figure 5: Noise components. - -def draw_noise_components_figure(png, Gs, w, h, seeds, noise_ranges, flips): - print(png) - Gsc = Gs.clone() - noise_vars = [var for name, var in Gsc.components.synthesis.vars.items() if name.startswith('noise')] - noise_pairs = list(zip(noise_vars, tflib.run(noise_vars))) # [(var, val), ...] - latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in seeds) - all_images = [] - for noise_range in noise_ranges: - tflib.set_vars({var: val * (1 if i in noise_range else 0) for i, (var, val) in enumerate(noise_pairs)}) - range_images = Gsc.run(latents, None, truncation_psi=1, randomize_noise=False, **synthesis_kwargs) - range_images[flips, :, :] = range_images[flips, :, ::-1] - all_images.append(list(range_images)) - - canvas = PIL.Image.new('RGB', (w * 2, h * 2), 'white') - for col, col_images in enumerate(zip(*all_images)): - canvas.paste(PIL.Image.fromarray(col_images[0], 'RGB').crop((0, 0, w//2, h)), (col * w, 0)) - canvas.paste(PIL.Image.fromarray(col_images[1], 'RGB').crop((w//2, 0, w, h)), (col * w + w//2, 0)) - canvas.paste(PIL.Image.fromarray(col_images[2], 'RGB').crop((0, 0, w//2, h)), (col * w, h)) - canvas.paste(PIL.Image.fromarray(col_images[3], 'RGB').crop((w//2, 0, w, h)), (col * w + w//2, h)) - canvas.save(png) - -#---------------------------------------------------------------------------- -# Figure 8: Truncation trick. - -def draw_truncation_trick_figure(png, Gs, w, h, seeds, psis): - print(png) - latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in seeds) - dlatents = Gs.components.mapping.run(latents, None) # [seed, layer, component] - dlatent_avg = Gs.get_var('dlatent_avg') # [component] - - canvas = PIL.Image.new('RGB', (w * len(psis), h * len(seeds)), 'white') - for row, dlatent in enumerate(list(dlatents)): - row_dlatents = (dlatent[np.newaxis] - dlatent_avg) * np.reshape(psis, [-1, 1, 1]) + dlatent_avg - row_images = Gs.components.synthesis.run(row_dlatents, randomize_noise=False, **synthesis_kwargs) - for col, image in enumerate(list(row_images)): - canvas.paste(PIL.Image.fromarray(image, 'RGB'), (col * w, row * h)) - canvas.save(png) - -#---------------------------------------------------------------------------- -# Main program. - -def main(): - tflib.init_tf() - os.makedirs(config.result_dir, exist_ok=True) - draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure02-uncurated-ffhq.png'), load_Gs(url_ffhq), cx=0, cy=0, cw=1024, ch=1024, rows=3, lods=[0,1,2,2,3,3], seed=5) - draw_style_mixing_figure(os.path.join(config.result_dir, 'figure03-style-mixing.png'), load_Gs(url_ffhq), w=1024, h=1024, src_seeds=[639,701,687,615,2268], dst_seeds=[888,829,1898,1733,1614,845], style_ranges=[range(0,4)]*3+[range(4,8)]*2+[range(8,18)]) - draw_noise_detail_figure(os.path.join(config.result_dir, 'figure04-noise-detail.png'), load_Gs(url_ffhq), w=1024, h=1024, num_samples=100, seeds=[1157,1012]) - draw_noise_components_figure(os.path.join(config.result_dir, 'figure05-noise-components.png'), load_Gs(url_ffhq), w=1024, h=1024, seeds=[1967,1555], noise_ranges=[range(0, 18), range(0, 0), range(8, 18), range(0, 8)], flips=[1]) - draw_truncation_trick_figure(os.path.join(config.result_dir, 'figure08-truncation-trick.png'), load_Gs(url_ffhq), w=1024, h=1024, seeds=[91,388], psis=[1, 0.7, 0.5, 0, -0.5, -1]) - draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure10-uncurated-bedrooms.png'), load_Gs(url_bedrooms), cx=0, cy=0, cw=256, ch=256, rows=5, lods=[0,0,1,1,2,2,2], seed=0) - draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure11-uncurated-cars.png'), load_Gs(url_cars), cx=0, cy=64, cw=512, ch=384, rows=4, lods=[0,1,2,2,3,3], seed=2) - draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure12-uncurated-cats.png'), load_Gs(url_cats), cx=0, cy=0, cw=256, ch=256, rows=5, lods=[0,0,1,1,2,2,2], seed=1) - -#---------------------------------------------------------------------------- - -if __name__ == "__main__": - main() - -#---------------------------------------------------------------------------- diff --git a/spaces/michaelthwan/digest-everything-gpt/SHOWCASE.md b/spaces/michaelthwan/digest-everything-gpt/SHOWCASE.md deleted file mode 100644 index 6442ef1279ac28a7928bcd0be3b392ad5788f714..0000000000000000000000000000000000000000 --- a/spaces/michaelthwan/digest-everything-gpt/SHOWCASE.md +++ /dev/null @@ -1,15 +0,0 @@ -# Showcase of DigestEverythingGPT - -## Youtube - -It will show both timestamp summary and overall summary. - -Some may think that DigestEverythingGPT's output is too long, but timestamp summary and overall summary is preferred for different audiences. - -Therefore users can extract / read what they prefer to read and use - -| DigestEverythingGPT | Similar product | -|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ️️️V️ideo: OpenAI CEO Sam Altman on the Future of AI
      📝Timestamped summary
      0:04 - OpenAI CEO Sam Altman shares insights on AI excitement worldwide
      1:40 - Promoting social responsibility with AI development is crucial
      4:13 - Managing risks and regulations for safe AI implementation
      6:16 - OpenAI’s partnership with Microsoft and concerns about control
      8:28 - Elon Musk’s concerns and differing opinions on AI safety
      12:10 - Safeguarding against bias in AI models and user control
      15:53 - Altman’s financial incentives and motivations for OpenAI
      17:26 - Embracing the potential and impact of AI technology
      19:33 - Altman’s perspective on AI advancements and collaboration
      22:05 - Trust in OpenAI should extend beyond a single person


      📝Summary
      Summary: OpenAI CEO Sam Altman discusses his recent experiences and the future of AI, highlighting the need for thoughtful regulation and the potential benefits of AI for education, healthcare, and scientific progress.

      Highlights:
      - 🌍 Surprising global excitement and optimism about AI, as well as the desire for cooperative action to mitigate risks.
      - 🤝 Changes in AI development based on feedback from developers and the need to customize tools to reflect different values and cultures.
      - ⚠️ Recognizing the importance of AI safety and the need to mitigate the risks of powerful technology being used in dangerous ways.
      - 💡 The potential for AI to bring about widespread improvements in quality education, medical care, and scientific progress.
      - 🤝 Calling for global regulation to ensure the safe development and use of AI, while acknowledging the challenges of finding the right balance.
      | Summary
      OpenAI CEO Sam Altman talks about his recent trips and the global excitement and anxiety surrounding AI. He discusses the risks and benefits of AI development, the importance of global cooperation, and his support for regulation of AI.

      Highlights
      Altman traveled to Rio and Tokyo and observed intense excitement and optimism about AI, with a simultaneous desire for thoughtful discussions on how to drive social progress.
      Altman acknowledges the risks of AI but believes that they can be mitigated through good safety practices and global regulation.
      Altman supports a certification system for AI and believes it can avoid regulatory capture, but small startups should not be overregulated. | -| Summary:
      A YouTube video showcasing 10 desk setup accessories worth buying, including a handheld air duster, charging dock, Logitech Sona microphone, BenQ screen bar, Insta 360 webcam, Ugreen digien s Cube, Logitech MX Keys keyboard, Logitech MX Master mouse, fantic NEX screwdriver, braided USB-C cables, grovemate organizer tray, and insulated water bottles.

      Items mentioned in the video:
      - 💨 Handheld air duster
      - 🔌 Charging dock
      - 🎙️ Logitech Sona microphone
      - 💡 BenQ screen bar
      - 📷 Insta 360 webcam
      - 🧊 Ugreen digien s Cube
      - ⌨️ Logitech MX Keys keyboard
      - 🖱️ Logitech MX Master mouse
      - 🔧 Fantic NEX screwdriver
      - 📱 Braided USB-C cables
      - 🗄️ Grovemate organizer tray
      - 🚰 Insulated water bottles

      ⚡by DigestEverythingGPT | Summary
      This video showcases ten desk setup accessories that are worth buying, including a handheld air duster, a charging dock, a Logitech Sona microphone, a BenQ screen bar, an Insta 360 Link webcam, a Ugreen DigiNess Cube extension lead, Logitech MX Keys and Mouse, a Fantic Nex Screwdriver, and Braided USBC Cables.

      Highlights
      Handheld air duster makes keeping the desk clean much easier.
      Ugreen charging dock with fast charging capability keeps the desk tidy and is hidden from view.
      Logitech Sona microphone with clear amp technology and the best quality webcam is Insta 360 Link. | -| 🎞️Video: How AI Powers Self-Driving Tesla with Elon Musk and Andrej Karpathy
      📝Timestamped summary
      0:00 - Neural networks have been trained for years, but only recently used in industry.
      0:30 - Andrej Karpathy taught computer vision at Stanford.
      1:03 - Neural networks process videos from eight cameras in Teslas.
      2:04 - Neural networks convert grid of pixels into high-level concepts.
      3:36 - Neural networks require large, varied, and real data sets.
      5:07 - Neural networks need thousands of examples to learn.
      6:38 - The training process for neural networks takes hours or days.
      8:12 - Tesla’s data engine engine improves network accuracy over time.
      10:17 - Simulations can’t match the complexity of real-world driving.
      13:22 - Fleet learning improves object detection and path prediction.
      15:24 - Data collected in shadow mode is used to refine networks.
      17:28 - Fleet learning improved Tesla’s ability to detect cut-ins.
      19:30 - Path prediction is shaped by fleets’ human drivers.
      21:04 - Neural networks can predict paths beyond the line of sight.
      22:35 - Depth perception in self-driving cars can be achieved with vision.
      24:36 - Video data can be used for 3D environment reconstruction.
      26:39 - Neural networks can learn depth from sensor annotations.
      27:39 - Self-supervision techniques can train neural networks for depth perception.
      28:43 - Visual recognition is crucial for autonomy in self-driving cars.


      📝Summary
      Summary: The talk discusses the training of neural networks for self-driving cars, the importance of large and real datasets, and the power of visual recognition. It emphasizes the superiority of vision over lidar and showcases different approaches to depth perception using vision alone.

      Highlights:
      - 💡 Neural networks require large, varied, and real datasets to work effectively for self-driving.
      - 💡 The fleet of Tesla cars provides the necessary data for training and improving neural networks.
      - 💡 Visual recognition is crucial for autonomy as it enables understanding of the environment.
      - 💡 Lidar is a shortcut that fails to address the fundamental problem of visual recognition.
      - 💡 Depth perception can be achieved through techniques like multi-view stereo, sensor annotation, and self-supervision.


      ⚡by DigestEverythingGPT | Summary
      This video features Tesla's Director of AI, Andrej Karpathy, explaining the use of neural networks in Tesla's self-driving cars. He discusses data collection, neural network training, and visual recognition.

      Highlights
      🚗 In Tesla's self-driving cars, neural networks process video data from multiple cameras to make predictions about lane markings, other objects, and drivable space.
      🧠 Neural networks require a lot of data and start from scratch to make predictions, requiring millions of labeled examples.
      📈 The use of a fleet allows for more targeted and diverse data collection to improve the accuracy of neural network predictions. | \ No newline at end of file diff --git a/spaces/milai-tk/clip-human-action-img2txt/app.py b/spaces/milai-tk/clip-human-action-img2txt/app.py deleted file mode 100644 index c17d544110be9223313de81b16d58dec332b44e7..0000000000000000000000000000000000000000 --- a/spaces/milai-tk/clip-human-action-img2txt/app.py +++ /dev/null @@ -1,57 +0,0 @@ -import clip -import gradio as gr -import numpy as np -import simple_chalk as chalk -import torch -from googletrans import Translator -from PIL import Image - -TOP_N = 5 - - -def match_texts(in_img: Image) -> list: - - """モデル準備""" - device = "cuda" if torch.cuda.is_available() else "cpu" - model, preprocess = clip.load("ViT-B/32", device=device) - - """ テキスト前処理 """ - translator = Translator() - trans_dict = {} - with open("./sentences_ja.txt") as f: - for ja_sentence in f: - en_sentence = translator.translate(ja_sentence, dest="en", src="ja").text - trans_dict[en_sentence] = ja_sentence - en_sentences = list(trans_dict.keys()) - texts = clip.tokenize(en_sentences).to(device) - - """ 画像前処理 """ - # image: Tensor (3, 224, 224) -> (1, 3, 224, 224) - image = preprocess(in_img).unsqueeze(0).to(device) - - """ CLIP モデルで処理 """ - with torch.no_grad(): - logits_per_image, logits_per_text = model(image, texts) - probs = logits_per_image.softmax(dim=-1).cpu().numpy() - probs_per_image = probs.reshape(-1) - sort_index = np.argsort(probs_per_image)[::-1] - - """ 処理結果(テキスト)出力 """ - idxs = sort_index.tolist() - # 英語出力 - # confidences = {en_sentences[i]: float(probs_per_image[i]) for i in idxs} - # 日本語変換出力 - confidences = {trans_dict[en_sentences[i]]: float(probs_per_image[i]) for i in idxs} - return confidences - - -if __name__ == "__main__": - inputs = gr.Image(type="pil", label="画像を入力") - outputs = gr.Label(num_top_classes=TOP_N, label=f"一致したテキスト Top-{TOP_N}") - gr.Interface( - fn=match_texts, - inputs=inputs, - outputs=outputs, - examples=["examples-01.jpg", "examples-02.jpg", "examples-03.jpg"], - allow_flagging="never", - ).launch(share=False) diff --git a/spaces/mileslilly/City-classifier/README.md b/spaces/mileslilly/City-classifier/README.md deleted file mode 100644 index 69b44cf0d49f8c42add48a26e4d70c8e0eb5d79e..0000000000000000000000000000000000000000 --- a/spaces/mileslilly/City-classifier/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: City Classifier -emoji: 🏆 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mithril-security/blind_chat/src/routes/conversation/[id]/phi/m.d.ts b/spaces/mithril-security/blind_chat/src/routes/conversation/[id]/phi/m.d.ts deleted file mode 100644 index fe14a0811eca165ed4ef0a0e923b2e7a6930b533..0000000000000000000000000000000000000000 --- a/spaces/mithril-security/blind_chat/src/routes/conversation/[id]/phi/m.d.ts +++ /dev/null @@ -1,84 +0,0 @@ -/* tslint:disable */ -/* eslint-disable */ -/** - */ -export class Model { - free(): void; - /** - * @param {Uint8Array} weights - * @param {Uint8Array} tokenizer - * @param {boolean} quantized - */ - constructor(weights: Uint8Array, tokenizer: Uint8Array, quantized: boolean); - /** - * @param {string} prompt - * @param {number} temp - * @param {number} top_p - * @param {number} repeat_penalty - * @param {number} repeat_last_n - * @param {bigint} seed - * @returns {string} - */ - init_with_prompt( - prompt: string, - temp: number, - top_p: number, - repeat_penalty: number, - repeat_last_n: number, - seed: bigint - ): string; - /** - * @returns {string} - */ - next_token(): string; -} - -export type InitInput = RequestInfo | URL | Response | BufferSource | WebAssembly.Module; - -export interface InitOutput { - readonly memory: WebAssembly.Memory; - readonly __wbg_model_free: (a: number) => void; - readonly model_load: (a: number, b: number, c: number, d: number, e: number, f: number) => void; - readonly model_init_with_prompt: ( - a: number, - b: number, - c: number, - d: number, - e: number, - f: number, - g: number, - h: number, - i: number - ) => void; - readonly model_next_token: (a: number, b: number) => void; - readonly main: (a: number, b: number) => number; - readonly __wbindgen_add_to_stack_pointer: (a: number) => number; - readonly __wbindgen_malloc: (a: number, b: number) => number; - readonly __wbindgen_realloc: (a: number, b: number, c: number, d: number) => number; - readonly __wbindgen_free: (a: number, b: number, c: number) => void; - readonly __wbindgen_exn_store: (a: number) => void; - readonly __wbindgen_start: () => void; -} - -export type SyncInitInput = BufferSource | WebAssembly.Module; -/** - * Instantiates the given `module`, which can either be bytes or - * a precompiled `WebAssembly.Module`. - * - * @param {SyncInitInput} module - * - * @returns {InitOutput} - */ -export function initSync(module: SyncInitInput): InitOutput; - -/** - * If `module_or_path` is {RequestInfo} or {URL}, makes a request and - * for everything else, calls `WebAssembly.instantiate` directly. - * - * @param {InitInput | Promise} module_or_path - * - * @returns {Promise} - */ -export default function __wbg_init( - module_or_path?: InitInput | Promise -): Promise; diff --git a/spaces/miyaaa666/bingo/src/pages/api/create.ts b/spaces/miyaaa666/bingo/src/pages/api/create.ts deleted file mode 100644 index 430bb2d53431e6a2c7608234f512f2d9f577daee..0000000000000000000000000000000000000000 --- a/spaces/miyaaa666/bingo/src/pages/api/create.ts +++ /dev/null @@ -1,31 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { fetch, debug } from '@/lib/isomorphic' -import { createHeaders } from '@/lib/utils' - -const API_ENDPOINT = 'https://www.bing.com/turing/conversation/create' -// const API_ENDPOINT = 'https://edgeservices.bing.com/edgesvc/turing/conversation/create'; - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const headers = createHeaders(req.cookies) - - res.writeHead(200, { - 'Content-Type': 'application/json', - }) - - debug('headers', headers) - const response = await fetch(API_ENDPOINT, { method: 'GET', headers }) - .then((res) => res.text()) - - res.end(response) - } catch (e) { - return res.end(JSON.stringify({ - result: { - value: 'UnauthorizedRequest', - message: `${e}` - } - })) - } -} diff --git a/spaces/mlpc-lab/BLIVA/bliva/processors/clip_processors.py b/spaces/mlpc-lab/BLIVA/bliva/processors/clip_processors.py deleted file mode 100644 index 9b467184aa0856e9ca8404dd4363e8f7854d06cc..0000000000000000000000000000000000000000 --- a/spaces/mlpc-lab/BLIVA/bliva/processors/clip_processors.py +++ /dev/null @@ -1,92 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -from bliva.common.registry import registry -from bliva.processors.blip_processors import BlipImageBaseProcessor -from omegaconf import OmegaConf -from torchvision import transforms -from torchvision.transforms.functional import InterpolationMode - - -def _convert_to_rgb(image): - return image.convert("RGB") - - -@registry.register_processor("clip_image_train") -class ClipImageTrainProcessor(BlipImageBaseProcessor): - def __init__( - self, image_size=224, mean=None, std=None, min_scale=0.9, max_scale=1.0 - ): - - super().__init__(mean=mean, std=std) - - self.transform = transforms.Compose( - [ - transforms.RandomResizedCrop( - image_size, - scale=(min_scale, max_scale), - interpolation=InterpolationMode.BICUBIC, - ), - _convert_to_rgb, - transforms.ToTensor(), - self.normalize, - ] - ) - - @classmethod - def from_config(cls, cfg=None): - if cfg is None: - cfg = OmegaConf.create() - - image_size = cfg.get("image_size", 224) - - mean = cfg.get("mean", None) - std = cfg.get("std", None) - - min_scale = cfg.get("min_scale", 0.9) - max_scale = cfg.get("max_scale", 1.0) - - return cls( - image_size=image_size, - mean=mean, - std=std, - min_scale=min_scale, - max_scale=max_scale, - ) - - -@registry.register_processor("clip_image_eval") -class ClipImageEvalProcessor(BlipImageBaseProcessor): - def __init__(self, image_size=224, mean=None, std=None): - - super().__init__(mean=mean, std=std) - - self.transform = transforms.Compose( - [ - transforms.Resize(image_size, interpolation=InterpolationMode.BICUBIC), - transforms.CenterCrop(image_size), - _convert_to_rgb, - transforms.ToTensor(), - self.normalize, - ] - ) - - @classmethod - def from_config(cls, cfg=None): - if cfg is None: - cfg = OmegaConf.create() - - image_size = cfg.get("image_size", 224) - - mean = cfg.get("mean", None) - std = cfg.get("std", None) - - return cls( - image_size=image_size, - mean=mean, - std=std, - ) diff --git a/spaces/monra/freegpt-webui/g4f/Provider/Providers/Bard.py b/spaces/monra/freegpt-webui/g4f/Provider/Providers/Bard.py deleted file mode 100644 index 4c37c4b719430031fce41ce49946f0e6ac93d155..0000000000000000000000000000000000000000 --- a/spaces/monra/freegpt-webui/g4f/Provider/Providers/Bard.py +++ /dev/null @@ -1,74 +0,0 @@ -import os, requests, json, browser_cookie3, re, random -from ...typing import sha256, Dict, get_type_hints - -url = 'https://bard.google.com' -model = ['Palm2'] -supports_stream = False -needs_auth = True - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome( - domain_name='.google.com')}['__Secure-1PSID'] - - formatted = '\n'.join([ - '%s: %s' % (message['role'], message['content']) for message in messages - ]) - prompt = f'{formatted}\nAssistant:' - - proxy = kwargs.get('proxy', False) - if proxy == False: - print('warning!, you did not give a proxy, a lot of countries are banned from Google Bard, so it may not work') - - snlm0e = None - conversation_id = None - response_id = None - choice_id = None - - client = requests.Session() - client.proxies = { - 'http': f'http://{proxy}', - 'https': f'http://{proxy}'} if proxy else None - - client.headers = { - 'authority': 'bard.google.com', - 'content-type': 'application/x-www-form-urlencoded;charset=UTF-8', - 'origin': 'https://bard.google.com', - 'referer': 'https://bard.google.com/', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', - 'x-same-domain': '1', - 'cookie': f'__Secure-1PSID={psid}' - } - - snlm0e = re.search(r'SNlM0e\":\"(.*?)\"', - client.get('https://bard.google.com/').text).group(1) if not snlm0e else snlm0e - - params = { - 'bl': 'boq_assistant-bard-web-server_20230326.21_p0', - '_reqid': random.randint(1111, 9999), - 'rt': 'c' - } - - data = { - 'at': snlm0e, - 'f.req': json.dumps([None, json.dumps([[prompt], None, [conversation_id, response_id, choice_id]])])} - - intents = '.'.join([ - 'assistant', - 'lamda', - 'BardFrontendService' - ]) - - response = client.post(f'https://bard.google.com/_/BardChatUi/data/{intents}/StreamGenerate', - data=data, params=params) - - chat_data = json.loads(response.content.splitlines()[3])[0][2] - if chat_data: - json_chat_data = json.loads(chat_data) - - yield json_chat_data[0][0] - - else: - yield 'error' - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/morganreese8/rhymethyme/README.md b/spaces/morganreese8/rhymethyme/README.md deleted file mode 100644 index 97b78130b07d0bdd57da04c18576377d18c2b32d..0000000000000000000000000000000000000000 --- a/spaces/morganreese8/rhymethyme/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: RHYME THYME -emoji: 🥧 -colorFrom: blue -colorTo: indigo -sdk: streamlit -sdk_version: 1.15.2 -app_file: app.py -pinned: false -license: openrail -duplicated_from: hamza50/rhymethyme ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/adaptive_softmax.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/adaptive_softmax.py deleted file mode 100644 index ae0c77ba0f6ee98501306d66cbc4a948b4ade0f7..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/adaptive_softmax.py +++ /dev/null @@ -1,268 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import functools -import operator - -import torch -import torch.nn.functional as F -from fairseq.modules.fairseq_dropout import FairseqDropout -from fairseq.modules.quant_noise import quant_noise -from torch import nn - - -class TiedLinear(nn.Module): - def __init__(self, weight, transpose): - super().__init__() - self.weight = weight - self.transpose = transpose - - def forward(self, input): - return F.linear(input, self.weight.t() if self.transpose else self.weight) - - -class TiedHeadModule(nn.Module): - def __init__(self, weights, input_dim, num_classes, q_noise, qn_block_size): - super().__init__() - tied_emb, _ = weights - self.num_words, emb_dim = tied_emb.size() - - self.word_proj = quant_noise( - TiedLinear(tied_emb, transpose=False), q_noise, qn_block_size - ) - if input_dim != emb_dim: - self.word_proj = nn.Sequential( - quant_noise( - nn.Linear(input_dim, emb_dim, bias=False), q_noise, qn_block_size - ), - self.word_proj, - ) - - self.class_proj = quant_noise( - nn.Linear(input_dim, num_classes, bias=False), q_noise, qn_block_size - ) - self.out_dim = self.num_words + num_classes - - self.register_buffer("_float_tensor", torch.FloatTensor(1)) - - def forward(self, input): - inp_sz = functools.reduce(operator.mul, input.shape[:-1], 1) - out = self._float_tensor.new(inp_sz, self.out_dim) - out[:, : self.num_words] = self.word_proj(input.view(inp_sz, -1)) - out[:, self.num_words :] = self.class_proj(input.view(inp_sz, -1)) - return out - - -class AdaptiveSoftmax(nn.Module): - """ - This is an implementation of the efficient softmax approximation for - graphical processing units (GPU), described in the paper "Efficient softmax - approximation for GPUs" (http://arxiv.org/abs/1609.04309). - """ - - def __init__( - self, - vocab_size, - input_dim, - cutoff, - dropout, - factor=4.0, - adaptive_inputs=None, - tie_proj=False, - q_noise=0, - qn_block_size=8, - ): - super().__init__() - - if vocab_size > cutoff[-1]: - cutoff = cutoff + [vocab_size] - else: - assert ( - vocab_size == cutoff[-1] - ), "cannot specify cutoff larger than vocab size" - - output_dim = cutoff[0] + len(cutoff) - 1 - - self.vocab_size = vocab_size - self.cutoff = cutoff - self.dropout_module = FairseqDropout( - dropout, module_name=self.__class__.__name__ - ) - self.input_dim = input_dim - self.factor = factor - self.q_noise = q_noise - self.qn_block_size = qn_block_size - - self.lsm = nn.LogSoftmax(dim=1) - - if adaptive_inputs is not None: - self.head = TiedHeadModule( - adaptive_inputs.weights_for_band(0), - input_dim, - len(cutoff) - 1, - self.q_noise, - self.qn_block_size, - ) - else: - self.head = quant_noise( - nn.Linear(input_dim, output_dim, bias=False), - self.q_noise, - self.qn_block_size, - ) - - self._make_tail(adaptive_inputs, tie_proj) - - def init_weights(m): - if ( - hasattr(m, "weight") - and not isinstance(m, TiedLinear) - and not isinstance(m, TiedHeadModule) - ): - nn.init.xavier_uniform_(m.weight) - - self.apply(init_weights) - - self.register_buffer("version", torch.LongTensor([1])) - - def _make_tail(self, adaptive_inputs=None, tie_proj=False): - self.tail = nn.ModuleList() - for i in range(len(self.cutoff) - 1): - dim = int(self.input_dim // self.factor ** (i + 1)) - - tied_emb, tied_proj = ( - adaptive_inputs.weights_for_band(i + 1) - if adaptive_inputs is not None - else (None, None) - ) - - if tied_proj is not None: - if tie_proj: - proj = quant_noise( - TiedLinear(tied_proj, transpose=True), - self.q_noise, - self.qn_block_size, - ) - else: - proj = quant_noise( - nn.Linear(tied_proj.size(0), tied_proj.size(1), bias=False), - self.q_noise, - self.qn_block_size, - ) - else: - proj = quant_noise( - nn.Linear(self.input_dim, dim, bias=False), - self.q_noise, - self.qn_block_size, - ) - - if tied_emb is None: - out_proj = nn.Linear( - dim, self.cutoff[i + 1] - self.cutoff[i], bias=False - ) - else: - out_proj = TiedLinear(tied_emb, transpose=False) - - m = nn.Sequential( - proj, - nn.Dropout(self.dropout_module.p), - quant_noise(out_proj, self.q_noise, self.qn_block_size), - ) - - self.tail.append(m) - - def upgrade_state_dict_named(self, state_dict, name): - version_name = name + ".version" - if version_name not in state_dict: - raise Exception("This version of the model is no longer supported") - - def adapt_target(self, target): - """ - In order to be efficient, the AdaptiveSoftMax does not compute the - scores for all the word of the vocabulary for all the examples. It is - thus necessary to call the method adapt_target of the AdaptiveSoftMax - layer inside each forward pass. - """ - - target = target.view(-1) - new_target = [target.clone()] - target_idxs = [] - - for i in range(len(self.cutoff) - 1): - mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1])) - new_target[0][mask] = self.cutoff[0] + i - - if mask.any(): - target_idxs.append(mask.nonzero(as_tuple=False).squeeze(1)) - new_target.append(target[mask].add(-self.cutoff[i])) - else: - target_idxs.append(None) - new_target.append(None) - - return new_target, target_idxs - - def forward(self, input, target): - """ - Args: - input: (b x t x d) - target: (b x t) - Returns: - 2 lists: output for each cutoff section and new targets by cut off - """ - - input = input.contiguous().view(-1, input.size(-1)) - input = self.dropout_module(input) - - new_target, target_idxs = self.adapt_target(target) - output = [self.head(input)] - - for i in range(len(target_idxs)): - if target_idxs[i] is not None: - output.append(self.tail[i](input.index_select(0, target_idxs[i]))) - else: - output.append(None) - - return output, new_target - - def get_log_prob(self, input, target): - """ - Computes the log probabilities for all the words of the vocabulary, - given a 2D tensor of hidden vectors. - """ - - bsz, length, dim = input.size() - input = input.contiguous().view(-1, dim) - - if target is not None: - _, target_idxs = self.adapt_target(target) - else: - target_idxs = None - - head_y = self.head(input) - log_probs = head_y.new_zeros(input.size(0), self.vocab_size) - - head_sz = self.cutoff[0] + len(self.tail) - log_probs[:, :head_sz] = self.lsm(head_y) - tail_priors = log_probs[:, self.cutoff[0] : head_sz].clone() - - for i in range(len(self.tail)): - start = self.cutoff[i] - end = self.cutoff[i + 1] - - if target_idxs is None: - tail_out = log_probs[:, start:end] - tail_out.copy_(self.tail[i](input)) - log_probs[:, start:end] = self.lsm(tail_out).add_( - tail_priors[:, i, None] - ) - elif target_idxs[i] is not None: - idxs = target_idxs[i] - tail_out = log_probs[idxs, start:end] - tail_out.copy_(self.tail[i](input[idxs])) - log_probs[idxs, start:end] = self.lsm(tail_out).add_( - tail_priors[idxs, i, None] - ) - - log_probs = log_probs.view(bsz, length, -1) - return log_probs diff --git a/spaces/mshukor/UnIVAL/run_scripts/averaging/fusing/scaling_best/unival_caption_stage_1_initavg_caprefsnlivqa.sh b/spaces/mshukor/UnIVAL/run_scripts/averaging/fusing/scaling_best/unival_caption_stage_1_initavg_caprefsnlivqa.sh deleted file mode 100644 index d21719355d820a84c10a41b26aa32d39004ed155..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/run_scripts/averaging/fusing/scaling_best/unival_caption_stage_1_initavg_caprefsnlivqa.sh +++ /dev/null @@ -1,209 +0,0 @@ - - -# Number of GPUs per GPU worker -export GPUS_PER_NODE=8 -# Number of GPU workers, for single-worker training, please set to 1 -export NUM_NODES=$SLURM_NNODES -# The ip address of the rank-0 worker, for single-worker training, please set to localhost -master_addr=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1) -export MASTER_ADDR=$master_addr - -# The port for communication -export MASTER_PORT=12350 -# The rank of this worker, should be in {0, ..., WORKER_CNT-1}, for single-worker training, please set to 0 -export RANK=$SLURM_NODEID - -echo "MASTER_ADDR: $MASTER_ADDR" -echo "RANK :$RANK" -echo "NUM_NODES :$NUM_NODES" -echo "GPUS_PER_NODE :$GPUS_PER_NODE" - -export MIOPEN_USER_DB_PATH=/lus/home/NAT/gda2204/mshukor/.config/miopen_${MASTER_ADDR}_${SLURM_PROCID}/ - -echo "MIOPEN_USER_DB_PATH :$MIOPEN_USER_DB_PATH" - -num_workers=0 - - - -exp_name=unival_caption_stage_1_initavg_caprefsnlivqa - - - -ofa_dir=/lus/home/NAT/gda2204/mshukor/code/unival_ours -base_data_dir=/lus/scratch/NAT/gda2204/SHARED/data -base_log_dir=/work/NAT/gda2204/mshukor/logs - -save_base_log_dir=/lus/scratch/NAT/gda2204/SHARED/logs -save_dir=${save_base_log_dir}/ofa/checkpoints/caption/${exp_name} - -# save_dir=${base_log_dir}/ofa/checkpoints/caption/${exp_name} -log_dir=${save_dir} - -mkdir -p $log_dir $save_dir - -bpe_dir=${ofa_dir}/utils/BPE -user_dir=${ofa_dir}/ofa_module - - - -image_dir=${base_data_dir} - - -data_dir=${base_data_dir}/ofa/caption_data -# data=${data_dir}/caption_stage1_train.tsv,${data_dir}/caption_val.tsv - -# Note: If you have shuffled the data in advance, please uncomment the line below. -data=${data_dir}/caption_stage1_train_1.tsv,${data_dir}/caption_stage1_train_2.tsv,${data_dir}/caption_stage1_train_3.tsv,${data_dir}/caption_stage1_train_4.tsv,${data_dir}/caption_stage1_train_5.tsv,${data_dir}/caption_stage1_train_6.tsv,${data_dir}/caption_stage1_train_7.tsv,${data_dir}/caption_stage1_train_8.tsv,${data_dir}/caption_stage1_train_9.tsv,${data_dir}/caption_stage1_train_10.tsv,${data_dir}/caption_val.tsv - - -eval_cider_cached=${data_dir}/cider_cached_tokens/coco-valid-words.p - - -restore_file=/lus/scratch/NAT/gda2204/SHARED/logs/ofa/pretrained_models/average_models/avg_caprefsnlivqa.pt - -lr=1e-5 - - - - -selected_cols=0,4,2 - -task=caption -arch=unival_base -pretrained_model= - - -criterion=adjust_label_smoothed_encouraging_loss -label_smoothing=0.1 - -max_epoch=10 -warmup_ratio=0.06 -batch_size=16 -update_freq=1 -resnet_drop_path_rate=0.0 -encoder_drop_path_rate=0.1 -decoder_drop_path_rate=0.1 -dropout=0.1 -attention_dropout=0.0 -max_src_length=80 -max_tgt_length=20 -num_bins=1000 -# patch_image_size=480 -drop_worst_ratio=0.2 - - -### -image_encoder_name=timm_resnet #vit_base_patch16_224 timm_resnet resnet -patch_image_size=480 -resnet_type=resnet101 - -resnet_model_path=${base_log_dir}/pretrained_models/resnet101-5d3b4d8f.pth - -# video -video_encoder_name=all_resnext101 -patch_frame_size=384 -video_model_path=${base_log_dir}/pretrained_models/3dcnn/resnext-101-kinetics.pth #${base_log_dir}/pretrained_models/TimeSformer_divST_8x32_224_K600.pyth -num_frames=4 - -save_interval=1 -validate_interval_updates=2000 -save_interval_updates=0 - - -sample_patch_num='--sample-patch-num=784' # '' - -eval_args='--eval-args={"beam":5,"stop_on_max_len":true,"max_len_b":22,"no_repeat_ngram_size":3}' - - -drop_worst_ratio=0.05 # modified from 0.2 for el -drop_best_ratio=0.05 -drop_best_after=6000 -log_end=0.75 # for el -# log_end=1. # for el - -for max_epoch in {$max_epoch,}; do - echo "max_epoch "${max_epoch} - for warmup_ratio in {0.06,}; do - echo "warmup_ratio "${warmup_ratio} - for drop_worst_after in {6000,}; do - echo "drop_worst_after "${drop_worst_after} - - log_file=${log_dir}/${max_epoch}"_"${warmup_ratio}"_"${drop_worst_after}".log" - save_path=${save_dir}/${max_epoch}"_"${warmup_ratio}"_"${drop_worst_after} - mkdir -p $save_path - - python3 -m torch.distributed.launch \ - --nnodes=${NUM_NODES} \ - --nproc_per_node=${GPUS_PER_NODE} \ - --master_port=${MASTER_PORT} \ - --node_rank=${RANK} \ - --master_addr=${MASTER_ADDR} \ - --use_env ${ofa_dir}/train.py \ - $data \ - --selected-cols=${selected_cols} \ - --bpe-dir=${bpe_dir} \ - --user-dir=${user_dir} \ - --restore-file=${restore_file} \ - --save-dir=${save_path} \ - --task=${task} \ - --arch=${arch} \ - --criterion=${criterion} \ - --label-smoothing=${label_smoothing} \ - --batch-size=${batch_size} \ - --update-freq=${update_freq} \ - --encoder-normalize-before \ - --decoder-normalize-before \ - --share-decoder-input-output-embed \ - --share-all-embeddings \ - --layernorm-embedding \ - --patch-layernorm-embedding \ - --code-layernorm-embedding \ - --resnet-drop-path-rate=${resnet_drop_path_rate} \ - --encoder-drop-path-rate=${encoder_drop_path_rate} \ - --decoder-drop-path-rate=${decoder_drop_path_rate} \ - --dropout=${dropout} \ - --attention-dropout=${attention_dropout} \ - --weight-decay=0.01 --optimizer=adam --adam-betas="(0.9,0.999)" --adam-eps=1e-08 --clip-norm=1.0 \ - --lr-scheduler=polynomial_decay --lr=${lr} \ - --max-epoch=${max_epoch} --warmup-ratio=${warmup_ratio} \ - --log-format=simple --log-interval=10 \ - --fixed-validation-seed=7 \ - --no-epoch-checkpoints --keep-best-checkpoints=1 \ - --save-interval=${save_interval} --validate-interval=1 \ - --save-interval-updates=${save_interval_updates} --validate-interval-updates=${validate_interval_updates} \ - --eval-cider \ - --eval-cider-cached-tokens=${eval_cider_cached} \ - --eval-args='{"beam":5,"max_len_b":16,"no_repeat_ngram_size":3}' \ - --best-checkpoint-metric=cider --maximize-best-checkpoint-metric \ - --max-src-length=${max_src_length} \ - --max-tgt-length=${max_tgt_length} \ - --find-unused-parameters \ - --freeze-encoder-embedding \ - --freeze-decoder-embedding \ - --add-type-embedding \ - --scale-attn \ - --scale-fc \ - --scale-heads \ - --disable-entangle \ - --num-bins=${num_bins} \ - --patch-image-size=${patch_image_size} \ - --drop-worst-ratio=${drop_worst_ratio} \ - --drop-worst-after=${drop_worst_after} \ - --fp16 \ - --fp16-scale-window=512 \ - --num-workers=0 \ - --image-encoder-name=${image_encoder_name} \ - --image-dir=${image_dir} \ - --video-encoder-name=${video_encoder_name} \ - --video-model-path=${video_model_path} \ - --patch-frame-size=${patch_frame_size} \ - ${sample_patch_num} \ - ${eval_args} \ - --reset-dataloader --reset-meters --reset-optimizer \ - --log-end ${log_end} --drop-best-ratio ${drop_best_ratio} --drop-best-after ${drop_best_after} - - - done - done -done \ No newline at end of file diff --git a/spaces/mshukor/UnIVAL/run_scripts/averaging/ratatouille/scaling_best/caption/unival_caption_stage_1_initrefcocoplus.sh b/spaces/mshukor/UnIVAL/run_scripts/averaging/ratatouille/scaling_best/caption/unival_caption_stage_1_initrefcocoplus.sh deleted file mode 100644 index aee946353fb152bf6155fd70580065caa0318249..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/run_scripts/averaging/ratatouille/scaling_best/caption/unival_caption_stage_1_initrefcocoplus.sh +++ /dev/null @@ -1,210 +0,0 @@ - - -# Number of GPUs per GPU worker -export GPUS_PER_NODE=8 -# Number of GPU workers, for single-worker training, please set to 1 -export NUM_NODES=$SLURM_NNODES -# The ip address of the rank-0 worker, for single-worker training, please set to localhost -master_addr=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1) -export MASTER_ADDR=$master_addr - -# The port for communication -export MASTER_PORT=12350 -# The rank of this worker, should be in {0, ..., WORKER_CNT-1}, for single-worker training, please set to 0 -export RANK=$SLURM_NODEID - -echo "MASTER_ADDR: $MASTER_ADDR" -echo "RANK :$RANK" -echo "NUM_NODES :$NUM_NODES" -echo "GPUS_PER_NODE :$GPUS_PER_NODE" - -export MIOPEN_USER_DB_PATH=/lus/home/NAT/gda2204/mshukor/.config/miopen_${MASTER_ADDR}_${SLURM_PROCID}/ - -echo "MIOPEN_USER_DB_PATH :$MIOPEN_USER_DB_PATH" - -num_workers=0 - - - -exp_name=unival_caption_stage_1_initrefcocoplus - - - -ofa_dir=/lus/home/NAT/gda2204/mshukor/code/unival -base_data_dir=/lus/scratch/NAT/gda2204/SHARED/data -base_log_dir=/work/NAT/gda2204/mshukor/logs - -save_base_log_dir=/lus/scratch/NAT/gda2204/SHARED/logs -save_dir=${save_base_log_dir}/ofa/checkpoints/caption/${exp_name} - -# save_dir=${base_log_dir}/ofa/checkpoints/caption/${exp_name} -log_dir=${save_dir} - -mkdir -p $log_dir $save_dir - -bpe_dir=${ofa_dir}/utils/BPE -user_dir=${ofa_dir}/ofa_module - - - -image_dir=${base_data_dir} - - -data_dir=${base_data_dir}/ofa/caption_data -# data=${data_dir}/caption_stage1_train.tsv,${data_dir}/caption_val.tsv - -# Note: If you have shuffled the data in advance, please uncomment the line below. -data=${data_dir}/caption_stage1_train_1.tsv,${data_dir}/caption_stage1_train_2.tsv,${data_dir}/caption_stage1_train_3.tsv,${data_dir}/caption_stage1_train_4.tsv,${data_dir}/caption_stage1_train_5.tsv,${data_dir}/caption_stage1_train_6.tsv,${data_dir}/caption_stage1_train_7.tsv,${data_dir}/caption_stage1_train_8.tsv,${data_dir}/caption_stage1_train_9.tsv,${data_dir}/caption_stage1_train_10.tsv,${data_dir}/caption_val.tsv - - -eval_cider_cached=${data_dir}/cider_cached_tokens/coco-valid-words.p - - -restore_file=/lus/scratch/NAT/gda2204/SHARED/logs/ofa/checkpoints/refcocoplus/unival_refcocoplus/10_5e-5_512/checkpoint_best.pt - -lr=1e-5 - - -# ${base_log_dir}/ofa/checkpoints/caption/${exp_name}/10_0.06_6000/checkpoint_last.pt - - -selected_cols=0,4,2 - -task=caption -arch=unival_base -pretrained_model= - - -criterion=adjust_label_smoothed_encouraging_loss -label_smoothing=0.1 - -max_epoch=10 -warmup_ratio=0.06 -batch_size=16 -update_freq=1 -resnet_drop_path_rate=0.0 -encoder_drop_path_rate=0.1 -decoder_drop_path_rate=0.1 -dropout=0.1 -attention_dropout=0.0 -max_src_length=80 -max_tgt_length=20 -num_bins=1000 -# patch_image_size=480 -drop_worst_ratio=0.2 - - -### -image_encoder_name=timm_resnet #vit_base_patch16_224 timm_resnet resnet -patch_image_size=480 -resnet_type=resnet101 - -resnet_model_path=${base_log_dir}/pretrained_models/resnet101-5d3b4d8f.pth - -# video -video_encoder_name=all_resnext101 -patch_frame_size=384 -video_model_path=${base_log_dir}/pretrained_models/3dcnn/resnext-101-kinetics.pth #${base_log_dir}/pretrained_models/TimeSformer_divST_8x32_224_K600.pyth -num_frames=4 - -save_interval=1 -validate_interval_updates=2000 -save_interval_updates=0 - - -sample_patch_num='--sample-patch-num=784' # '' - -eval_args='--eval-args={"beam":5,"stop_on_max_len":true,"max_len_b":22,"no_repeat_ngram_size":3}' - - -drop_worst_ratio=0.05 # modified from 0.2 for el -drop_best_ratio=0.05 -drop_best_after=6000 -log_end=0.75 # for el -# log_end=1. # for el - -for max_epoch in {$max_epoch,}; do - echo "max_epoch "${max_epoch} - for warmup_ratio in {0.06,}; do - echo "warmup_ratio "${warmup_ratio} - for drop_worst_after in {6000,}; do - echo "drop_worst_after "${drop_worst_after} - - log_file=${log_dir}/${max_epoch}"_"${warmup_ratio}"_"${drop_worst_after}".log" - save_path=${save_dir}/${max_epoch}"_"${warmup_ratio}"_"${drop_worst_after} - mkdir -p $save_path - - python3 -m torch.distributed.launch \ - --nnodes=${NUM_NODES} \ - --nproc_per_node=${GPUS_PER_NODE} \ - --master_port=${MASTER_PORT} \ - --node_rank=${RANK} \ - --master_addr=${MASTER_ADDR} \ - --use_env ${ofa_dir}/train.py \ - $data \ - --selected-cols=${selected_cols} \ - --bpe-dir=${bpe_dir} \ - --user-dir=${user_dir} \ - --restore-file=${restore_file} \ - --save-dir=${save_path} \ - --task=${task} \ - --arch=${arch} \ - --criterion=${criterion} \ - --label-smoothing=${label_smoothing} \ - --batch-size=${batch_size} \ - --update-freq=${update_freq} \ - --encoder-normalize-before \ - --decoder-normalize-before \ - --share-decoder-input-output-embed \ - --share-all-embeddings \ - --layernorm-embedding \ - --patch-layernorm-embedding \ - --code-layernorm-embedding \ - --resnet-drop-path-rate=${resnet_drop_path_rate} \ - --encoder-drop-path-rate=${encoder_drop_path_rate} \ - --decoder-drop-path-rate=${decoder_drop_path_rate} \ - --dropout=${dropout} \ - --attention-dropout=${attention_dropout} \ - --weight-decay=0.01 --optimizer=adam --adam-betas="(0.9,0.999)" --adam-eps=1e-08 --clip-norm=1.0 \ - --lr-scheduler=polynomial_decay --lr=${lr} \ - --max-epoch=${max_epoch} --warmup-ratio=${warmup_ratio} \ - --log-format=simple --log-interval=10 \ - --fixed-validation-seed=7 \ - --no-epoch-checkpoints --keep-best-checkpoints=1 \ - --save-interval=${save_interval} --validate-interval=1 \ - --save-interval-updates=${save_interval_updates} --validate-interval-updates=${validate_interval_updates} \ - --eval-cider \ - --eval-cider-cached-tokens=${eval_cider_cached} \ - --eval-args='{"beam":5,"max_len_b":16,"no_repeat_ngram_size":3}' \ - --best-checkpoint-metric=cider --maximize-best-checkpoint-metric \ - --max-src-length=${max_src_length} \ - --max-tgt-length=${max_tgt_length} \ - --find-unused-parameters \ - --freeze-encoder-embedding \ - --freeze-decoder-embedding \ - --add-type-embedding \ - --scale-attn \ - --scale-fc \ - --scale-heads \ - --disable-entangle \ - --num-bins=${num_bins} \ - --patch-image-size=${patch_image_size} \ - --drop-worst-ratio=${drop_worst_ratio} \ - --drop-worst-after=${drop_worst_after} \ - --fp16 \ - --fp16-scale-window=512 \ - --num-workers=0 \ - --image-encoder-name=${image_encoder_name} \ - --image-dir=${image_dir} \ - --video-encoder-name=${video_encoder_name} \ - --video-model-path=${video_model_path} \ - --patch-frame-size=${patch_frame_size} \ - ${sample_patch_num} \ - ${eval_args} \ - --reset-dataloader --reset-meters --reset-optimizer \ - --log-end ${log_end} --drop-best-ratio ${drop_best_ratio} --drop-best-after ${drop_best_after} \ - - - done - done -done \ No newline at end of file diff --git a/spaces/mukish45/potato-disease-classification/README.md b/spaces/mukish45/potato-disease-classification/README.md deleted file mode 100644 index a91134489c32c6212ffcd21dfc2effdfac2af4c9..0000000000000000000000000000000000000000 --- a/spaces/mukish45/potato-disease-classification/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Potato Disease Classification -emoji: 📊 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/naqibhakimi/sk/lib/vis-9.1.2/vis-network.css b/spaces/naqibhakimi/sk/lib/vis-9.1.2/vis-network.css deleted file mode 100644 index 8b03219bb397b69eb8a4e1ef1cee18bf0f8f8916..0000000000000000000000000000000000000000 --- a/spaces/naqibhakimi/sk/lib/vis-9.1.2/vis-network.css +++ /dev/null @@ -1 +0,0 @@ -.vis-overlay{bottom:0;left:0;position:absolute;right:0;top:0;z-index:10}.vis-active{box-shadow:0 0 10px #86d5f8}.vis [class*=span]{min-height:0;width:auto}div.vis-color-picker{background-color:#fff;border-radius:15px;box-shadow:0 0 10px 0 rgba(0,0,0,.5);display:none;height:444px;left:30px;margin-left:30px;margin-top:-140px;padding:10px;position:absolute;top:0;width:310px;z-index:1}div.vis-color-picker div.vis-arrow{left:5px;position:absolute;top:147px}div.vis-color-picker div.vis-arrow:after,div.vis-color-picker div.vis-arrow:before{border:solid transparent;content:" ";height:0;pointer-events:none;position:absolute;right:100%;top:50%;width:0}div.vis-color-picker div.vis-arrow:after{border-color:hsla(0,0%,100%,0) #fff hsla(0,0%,100%,0) hsla(0,0%,100%,0);border-width:30px;margin-top:-30px}div.vis-color-picker div.vis-color{cursor:pointer;height:289px;position:absolute;width:289px}div.vis-color-picker div.vis-brightness{position:absolute;top:313px}div.vis-color-picker div.vis-opacity{position:absolute;top:350px}div.vis-color-picker div.vis-selector{background:#4c4c4c;background:-moz-linear-gradient(top,#4c4c4c 0,#595959 12%,#666 25%,#474747 39%,#2c2c2c 50%,#000 51%,#111 60%,#2b2b2b 76%,#1c1c1c 91%,#131313 100%);background:-webkit-gradient(linear,left top,left bottom,color-stop(0,#4c4c4c),color-stop(12%,#595959),color-stop(25%,#666),color-stop(39%,#474747),color-stop(50%,#2c2c2c),color-stop(51%,#000),color-stop(60%,#111),color-stop(76%,#2b2b2b),color-stop(91%,#1c1c1c),color-stop(100%,#131313));background:-webkit-linear-gradient(top,#4c4c4c,#595959 12%,#666 25%,#474747 39%,#2c2c2c 50%,#000 51%,#111 60%,#2b2b2b 76%,#1c1c1c 91%,#131313);background:-o-linear-gradient(top,#4c4c4c 0,#595959 12%,#666 25%,#474747 39%,#2c2c2c 50%,#000 51%,#111 60%,#2b2b2b 76%,#1c1c1c 91%,#131313 100%);background:-ms-linear-gradient(top,#4c4c4c 0,#595959 12%,#666 25%,#474747 39%,#2c2c2c 50%,#000 51%,#111 60%,#2b2b2b 76%,#1c1c1c 91%,#131313 100%);background:linear-gradient(180deg,#4c4c4c 0,#595959 12%,#666 25%,#474747 39%,#2c2c2c 50%,#000 51%,#111 60%,#2b2b2b 76%,#1c1c1c 91%,#131313);border:1px solid #fff;border-radius:15px;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr="#4c4c4c",endColorstr="#131313",GradientType=0);height:15px;left:137px;position:absolute;top:137px;width:15px}div.vis-color-picker div.vis-new-color{left:159px;padding-right:2px;text-align:right}div.vis-color-picker div.vis-initial-color,div.vis-color-picker div.vis-new-color{border:1px solid rgba(0,0,0,.1);border-radius:5px;color:rgba(0,0,0,.4);font-size:10px;height:20px;line-height:20px;position:absolute;top:380px;vertical-align:middle;width:140px}div.vis-color-picker div.vis-initial-color{left:10px;padding-left:2px;text-align:left}div.vis-color-picker div.vis-label{left:10px;position:absolute;width:300px}div.vis-color-picker div.vis-label.vis-brightness{top:300px}div.vis-color-picker div.vis-label.vis-opacity{top:338px}div.vis-color-picker div.vis-button{background-color:#f7f7f7;border:2px solid #d9d9d9;border-radius:10px;cursor:pointer;height:25px;line-height:25px;position:absolute;text-align:center;top:410px;vertical-align:middle;width:68px}div.vis-color-picker div.vis-button.vis-cancel{left:5px}div.vis-color-picker div.vis-button.vis-load{left:82px}div.vis-color-picker div.vis-button.vis-apply{left:159px}div.vis-color-picker div.vis-button.vis-save{left:236px}div.vis-color-picker input.vis-range{height:20px;width:290px}div.vis-configuration{display:block;float:left;font-size:12px;position:relative}div.vis-configuration-wrapper{display:block;width:700px}div.vis-configuration-wrapper:after{clear:both;content:"";display:block}div.vis-configuration.vis-config-option-container{background-color:#fff;border:2px solid #f7f8fa;border-radius:4px;display:block;left:10px;margin-top:20px;padding-left:5px;width:495px}div.vis-configuration.vis-config-button{background-color:#f7f8fa;border:2px solid #ceced0;border-radius:4px;cursor:pointer;display:block;height:25px;left:10px;line-height:25px;margin-bottom:30px;margin-top:20px;padding-left:5px;vertical-align:middle;width:495px}div.vis-configuration.vis-config-button.hover{background-color:#4588e6;border:2px solid #214373;color:#fff}div.vis-configuration.vis-config-item{display:block;float:left;height:25px;line-height:25px;vertical-align:middle;width:495px}div.vis-configuration.vis-config-item.vis-config-s2{background-color:#f7f8fa;border-radius:3px;left:10px;padding-left:5px}div.vis-configuration.vis-config-item.vis-config-s3{background-color:#e4e9f0;border-radius:3px;left:20px;padding-left:5px}div.vis-configuration.vis-config-item.vis-config-s4{background-color:#cfd8e6;border-radius:3px;left:30px;padding-left:5px}div.vis-configuration.vis-config-header{font-size:18px;font-weight:700}div.vis-configuration.vis-config-label{height:25px;line-height:25px;width:120px}div.vis-configuration.vis-config-label.vis-config-s3{width:110px}div.vis-configuration.vis-config-label.vis-config-s4{width:100px}div.vis-configuration.vis-config-colorBlock{border:1px solid #444;border-radius:2px;cursor:pointer;height:19px;margin:0;padding:0;top:1px;width:30px}input.vis-configuration.vis-config-checkbox{left:-5px}input.vis-configuration.vis-config-rangeinput{margin:0;padding:1px;pointer-events:none;position:relative;top:-5px;width:60px}input.vis-configuration.vis-config-range{-webkit-appearance:none;background-color:transparent;border:0 solid #fff;height:20px;width:300px}input.vis-configuration.vis-config-range::-webkit-slider-runnable-track{background:#dedede;background:-moz-linear-gradient(top,#dedede 0,#c8c8c8 99%);background:-webkit-gradient(linear,left top,left bottom,color-stop(0,#dedede),color-stop(99%,#c8c8c8));background:-webkit-linear-gradient(top,#dedede,#c8c8c8 99%);background:-o-linear-gradient(top,#dedede 0,#c8c8c8 99%);background:-ms-linear-gradient(top,#dedede 0,#c8c8c8 99%);background:linear-gradient(180deg,#dedede 0,#c8c8c8 99%);border:1px solid #999;border-radius:3px;box-shadow:0 0 3px 0 #aaa;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr="#dedede",endColorstr="#c8c8c8",GradientType=0);height:5px;width:300px}input.vis-configuration.vis-config-range::-webkit-slider-thumb{-webkit-appearance:none;background:#3876c2;background:-moz-linear-gradient(top,#3876c2 0,#385380 100%);background:-webkit-gradient(linear,left top,left bottom,color-stop(0,#3876c2),color-stop(100%,#385380));background:-webkit-linear-gradient(top,#3876c2,#385380);background:-o-linear-gradient(top,#3876c2 0,#385380 100%);background:-ms-linear-gradient(top,#3876c2 0,#385380 100%);background:linear-gradient(180deg,#3876c2 0,#385380);border:1px solid #14334b;border-radius:50%;box-shadow:0 0 1px 0 #111927;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr="#3876c2",endColorstr="#385380",GradientType=0);height:17px;margin-top:-7px;width:17px}input.vis-configuration.vis-config-range:focus{outline:none}input.vis-configuration.vis-config-range:focus::-webkit-slider-runnable-track{background:#9d9d9d;background:-moz-linear-gradient(top,#9d9d9d 0,#c8c8c8 99%);background:-webkit-gradient(linear,left top,left bottom,color-stop(0,#9d9d9d),color-stop(99%,#c8c8c8));background:-webkit-linear-gradient(top,#9d9d9d,#c8c8c8 99%);background:-o-linear-gradient(top,#9d9d9d 0,#c8c8c8 99%);background:-ms-linear-gradient(top,#9d9d9d 0,#c8c8c8 99%);background:linear-gradient(180deg,#9d9d9d 0,#c8c8c8 99%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr="#9d9d9d",endColorstr="#c8c8c8",GradientType=0)}input.vis-configuration.vis-config-range::-moz-range-track{background:#dedede;background:-moz-linear-gradient(top,#dedede 0,#c8c8c8 99%);background:-webkit-gradient(linear,left top,left bottom,color-stop(0,#dedede),color-stop(99%,#c8c8c8));background:-webkit-linear-gradient(top,#dedede,#c8c8c8 99%);background:-o-linear-gradient(top,#dedede 0,#c8c8c8 99%);background:-ms-linear-gradient(top,#dedede 0,#c8c8c8 99%);background:linear-gradient(180deg,#dedede 0,#c8c8c8 99%);border:1px solid #999;border-radius:3px;box-shadow:0 0 3px 0 #aaa;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr="#dedede",endColorstr="#c8c8c8",GradientType=0);height:10px;width:300px}input.vis-configuration.vis-config-range::-moz-range-thumb{background:#385380;border:none;border-radius:50%;height:16px;width:16px}input.vis-configuration.vis-config-range:-moz-focusring{outline:1px solid #fff;outline-offset:-1px}input.vis-configuration.vis-config-range::-ms-track{background:transparent;border-color:transparent;border-width:6px 0;color:transparent;height:5px;width:300px}input.vis-configuration.vis-config-range::-ms-fill-lower{background:#777;border-radius:10px}input.vis-configuration.vis-config-range::-ms-fill-upper{background:#ddd;border-radius:10px}input.vis-configuration.vis-config-range::-ms-thumb{background:#385380;border:none;border-radius:50%;height:16px;width:16px}input.vis-configuration.vis-config-range:focus::-ms-fill-lower{background:#888}input.vis-configuration.vis-config-range:focus::-ms-fill-upper{background:#ccc}.vis-configuration-popup{background:rgba(57,76,89,.85);border:2px solid #f2faff;border-radius:4px;color:#fff;font-size:14px;height:30px;line-height:30px;position:absolute;text-align:center;-webkit-transition:opacity .3s ease-in-out;-moz-transition:opacity .3s ease-in-out;transition:opacity .3s ease-in-out;width:150px}.vis-configuration-popup:after,.vis-configuration-popup:before{border:solid transparent;content:" ";height:0;left:100%;pointer-events:none;position:absolute;top:50%;width:0}.vis-configuration-popup:after{border-color:rgba(136,183,213,0) rgba(136,183,213,0) rgba(136,183,213,0) rgba(57,76,89,.85);border-width:8px;margin-top:-8px}.vis-configuration-popup:before{border-color:rgba(194,225,245,0) rgba(194,225,245,0) rgba(194,225,245,0) #f2faff;border-width:12px;margin-top:-12px}div.vis-tooltip{background-color:#f5f4ed;border:1px solid #808074;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;box-shadow:3px 3px 10px rgba(0,0,0,.2);color:#000;font-family:verdana;font-size:14px;padding:5px;pointer-events:none;position:absolute;visibility:hidden;white-space:nowrap;z-index:5}div.vis-network div.vis-navigation div.vis-button{-webkit-touch-callout:none;background-position:2px 2px;background-repeat:no-repeat;-moz-border-radius:17px;border-radius:17px;cursor:pointer;display:inline-block;height:34px;position:absolute;-webkit-user-select:none;-khtml-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;width:34px}div.vis-network div.vis-navigation div.vis-button:hover{box-shadow:0 0 3px 3px rgba(56,207,21,.3)}div.vis-network div.vis-navigation div.vis-button:active{box-shadow:0 0 1px 3px rgba(56,207,21,.95)}div.vis-network div.vis-navigation div.vis-button.vis-up{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAB4AAAAeCAYAAAA7MK6iAAAACXBIWXMAAAsTAAALEwEAmpwYAAAKT2lDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVNnVFPpFj333vRCS4iAlEtvUhUIIFJCi4AUkSYqIQkQSoghodkVUcERRUUEG8igiAOOjoCMFVEsDIoK2AfkIaKOg6OIisr74Xuja9a89+bN/rXXPues852zzwfACAyWSDNRNYAMqUIeEeCDx8TG4eQuQIEKJHAAEAizZCFz/SMBAPh+PDwrIsAHvgABeNMLCADATZvAMByH/w/qQplcAYCEAcB0kThLCIAUAEB6jkKmAEBGAYCdmCZTAKAEAGDLY2LjAFAtAGAnf+bTAICd+Jl7AQBblCEVAaCRACATZYhEAGg7AKzPVopFAFgwABRmS8Q5ANgtADBJV2ZIALC3AMDOEAuyAAgMADBRiIUpAAR7AGDIIyN4AISZABRG8lc88SuuEOcqAAB4mbI8uSQ5RYFbCC1xB1dXLh4ozkkXKxQ2YQJhmkAuwnmZGTKBNA/g88wAAKCRFRHgg/P9eM4Ors7ONo62Dl8t6r8G/yJiYuP+5c+rcEAAAOF0ftH+LC+zGoA7BoBt/qIl7gRoXgugdfeLZrIPQLUAoOnaV/Nw+H48PEWhkLnZ2eXk5NhKxEJbYcpXff5nwl/AV/1s+X48/Pf14L7iJIEyXYFHBPjgwsz0TKUcz5IJhGLc5o9H/LcL//wd0yLESWK5WCoU41EScY5EmozzMqUiiUKSKcUl0v9k4t8s+wM+3zUAsGo+AXuRLahdYwP2SycQWHTA4vcAAPK7b8HUKAgDgGiD4c93/+8//UegJQCAZkmScQAAXkQkLlTKsz/HCAAARKCBKrBBG/TBGCzABhzBBdzBC/xgNoRCJMTCQhBCCmSAHHJgKayCQiiGzbAdKmAv1EAdNMBRaIaTcA4uwlW4Dj1wD/phCJ7BKLyBCQRByAgTYSHaiAFiilgjjggXmYX4IcFIBBKLJCDJiBRRIkuRNUgxUopUIFVIHfI9cgI5h1xGupE7yAAygvyGvEcxlIGyUT3UDLVDuag3GoRGogvQZHQxmo8WoJvQcrQaPYw2oefQq2gP2o8+Q8cwwOgYBzPEbDAuxsNCsTgsCZNjy7EirAyrxhqwVqwDu4n1Y8+xdwQSgUXACTYEd0IgYR5BSFhMWE7YSKggHCQ0EdoJNwkDhFHCJyKTqEu0JroR+cQYYjIxh1hILCPWEo8TLxB7iEPENyQSiUMyJ7mQAkmxpFTSEtJG0m5SI+ksqZs0SBojk8naZGuyBzmULCAryIXkneTD5DPkG+Qh8lsKnWJAcaT4U+IoUspqShnlEOU05QZlmDJBVaOaUt2ooVQRNY9aQq2htlKvUYeoEzR1mjnNgxZJS6WtopXTGmgXaPdpr+h0uhHdlR5Ol9BX0svpR+iX6AP0dwwNhhWDx4hnKBmbGAcYZxl3GK+YTKYZ04sZx1QwNzHrmOeZD5lvVVgqtip8FZHKCpVKlSaVGyovVKmqpqreqgtV81XLVI+pXlN9rkZVM1PjqQnUlqtVqp1Q61MbU2epO6iHqmeob1Q/pH5Z/YkGWcNMw09DpFGgsV/jvMYgC2MZs3gsIWsNq4Z1gTXEJrHN2Xx2KruY/R27iz2qqaE5QzNKM1ezUvOUZj8H45hx+Jx0TgnnKKeX836K3hTvKeIpG6Y0TLkxZVxrqpaXllirSKtRq0frvTau7aedpr1Fu1n7gQ5Bx0onXCdHZ4/OBZ3nU9lT3acKpxZNPTr1ri6qa6UbobtEd79up+6Ynr5egJ5Mb6feeb3n+hx9L/1U/W36p/VHDFgGswwkBtsMzhg8xTVxbzwdL8fb8VFDXcNAQ6VhlWGX4YSRudE8o9VGjUYPjGnGXOMk423GbcajJgYmISZLTepN7ppSTbmmKaY7TDtMx83MzaLN1pk1mz0x1zLnm+eb15vft2BaeFostqi2uGVJsuRaplnutrxuhVo5WaVYVVpds0atna0l1rutu6cRp7lOk06rntZnw7Dxtsm2qbcZsOXYBtuutm22fWFnYhdnt8Wuw+6TvZN9un2N/T0HDYfZDqsdWh1+c7RyFDpWOt6azpzuP33F9JbpL2dYzxDP2DPjthPLKcRpnVOb00dnF2e5c4PziIuJS4LLLpc+Lpsbxt3IveRKdPVxXeF60vWdm7Obwu2o26/uNu5p7ofcn8w0nymeWTNz0MPIQ+BR5dE/C5+VMGvfrH5PQ0+BZ7XnIy9jL5FXrdewt6V3qvdh7xc+9j5yn+M+4zw33jLeWV/MN8C3yLfLT8Nvnl+F30N/I/9k/3r/0QCngCUBZwOJgUGBWwL7+Hp8Ib+OPzrbZfay2e1BjKC5QRVBj4KtguXBrSFoyOyQrSH355jOkc5pDoVQfujW0Adh5mGLw34MJ4WHhVeGP45wiFga0TGXNXfR3ENz30T6RJZE3ptnMU85ry1KNSo+qi5qPNo3ujS6P8YuZlnM1VidWElsSxw5LiquNm5svt/87fOH4p3iC+N7F5gvyF1weaHOwvSFpxapLhIsOpZATIhOOJTwQRAqqBaMJfITdyWOCnnCHcJnIi/RNtGI2ENcKh5O8kgqTXqS7JG8NXkkxTOlLOW5hCepkLxMDUzdmzqeFpp2IG0yPTq9MYOSkZBxQqohTZO2Z+pn5mZ2y6xlhbL+xW6Lty8elQfJa7OQrAVZLQq2QqboVFoo1yoHsmdlV2a/zYnKOZarnivN7cyzytuQN5zvn//tEsIS4ZK2pYZLVy0dWOa9rGo5sjxxedsK4xUFK4ZWBqw8uIq2Km3VT6vtV5eufr0mek1rgV7ByoLBtQFr6wtVCuWFfevc1+1dT1gvWd+1YfqGnRs+FYmKrhTbF5cVf9go3HjlG4dvyr+Z3JS0qavEuWTPZtJm6ebeLZ5bDpaql+aXDm4N2dq0Dd9WtO319kXbL5fNKNu7g7ZDuaO/PLi8ZafJzs07P1SkVPRU+lQ27tLdtWHX+G7R7ht7vPY07NXbW7z3/T7JvttVAVVN1WbVZftJ+7P3P66Jqun4lvttXa1ObXHtxwPSA/0HIw6217nU1R3SPVRSj9Yr60cOxx++/p3vdy0NNg1VjZzG4iNwRHnk6fcJ3/ceDTradox7rOEH0x92HWcdL2pCmvKaRptTmvtbYlu6T8w+0dbq3nr8R9sfD5w0PFl5SvNUyWna6YLTk2fyz4ydlZ19fi753GDborZ752PO32oPb++6EHTh0kX/i+c7vDvOXPK4dPKy2+UTV7hXmq86X23qdOo8/pPTT8e7nLuarrlca7nuer21e2b36RueN87d9L158Rb/1tWeOT3dvfN6b/fF9/XfFt1+cif9zsu72Xcn7q28T7xf9EDtQdlD3YfVP1v+3Njv3H9qwHeg89HcR/cGhYPP/pH1jw9DBY+Zj8uGDYbrnjg+OTniP3L96fynQ89kzyaeF/6i/suuFxYvfvjV69fO0ZjRoZfyl5O/bXyl/erA6xmv28bCxh6+yXgzMV70VvvtwXfcdx3vo98PT+R8IH8o/2j5sfVT0Kf7kxmTk/8EA5jz/GMzLdsAAAAgY0hSTQAAeiUAAICDAAD5/wAAgOkAAHUwAADqYAAAOpgAABdvkl/FRgAABphJREFUeNqcV2twU9cR/nbPlVTHxpKRbNnBLyEbPyJisLEcPwgwUMKQtjNJAzNJZkgNNJOmJaZAaDKlxaXDTIBAcJtOOzSYKSkdiimhAdIMjyT4bYgBYxA2BgcUQPLrCiGDR4qt2x+yXTASFt1/957d7zt3z3d39xDCMQWUfgAz/RI/T4pSTAJpAGL8rECAXX7QFQGq9wOHOxYO1oCgjAdJj1wtB095Giv9TFuZAIWHAziATMPhTAwiHgUkYPXFJu92lMP/2MTpB1AKUCVEgNAcleUo1M+2F8TO6crSTncb1QleAOj2OTSX3Ge1p+Va42m5JrnzbnsCE8Ov+EHgpa0LPLvCJjZ/whuIlN8wAcXG+e1LUn9hm238QU84p1Ld83nsXvuO7Lq+LzKYGAT6/dn58m/HJTYf4O3EShkT8Irpzab1Uz9sGevT5+tWn+j6NB4A5hp/5NSr43xjfd5rW5tT9e3OAhCBiCua5/WsDEls/hdvYklZSwDefmrT8eXmtzuDkb5YZ33p9ndylICAVjWxf39xw/5g5Luv/9H84ZWNcwNEypZT87rXjqyJB85UYDMJYN3U7UdLJ6/6JlgqV517teRqf9uTlug8e1zEk27HgD22o98WsTBh8fWxvjm6ApdONbGvse8LM5NUPOm1Cfabuz3nACAgxX0QEFTJAnjNvLJ+Sepb14KRHnN+Ev+1XJOhZs3Qu1mbG97J2NQgsXroa1dtxrGuf8cHi1mUtPTay0lv1DMJSCRVLtoX+FgGgDQNysBAcez89l9nbbsQSji7rlXkEhjPxb/QatHOcFu0M9zz419oFSRhj/3PuaHiyqasv1Con9NGxHAYUsoCxAqImbYSgCWmFbZQwdsur7N0eC4m6tT6/jUZ750Zeb82c+OZGLWh/2p/W+Kfrmy0hIp/aVKpTSIJEqu2QgFx2iE8CwDp0RbH7Ljng/4yXr+XT3QdyhYsodS0slGr0g2OrEUK7eCrKW82SqzCVz3/yfb6vRwM4xn9rN7JkRkOQRLmfJn2LBPxQjDBqp9lD7XbX7X8pKTP160zR2bdeiX5jYeU/nLSTztNkem3XL5eXbltRUkonBxdgZ2IIUmahUxERQSCVT+rK5hzQ89xQ6P8VaaK1f5VmRvqQ4G+lba+nlnlb5brMhvlk7FBiaPzuwQEmEQhg5BOxMjWTncHc2501cQLkjDTsMCWpyuRQxFP0xXIJfp5FyVW4Zy7KajC06ItbiIGg6ZITBxDxIgbrr1jTSM0fibGIHz8O9sKK0GAibEua9spANh4aY2VmcEg+DEkiBgR/L2hYFgGtcErkQQAMVJgBxyy9hboZzv32v+Kpr7qbEECTAIMAoaJa3qPTmNiiAAgJAjk6J5xhu6HDAIgQYGLmI29PocmMcI8MNYvT1ckfzD9H/ub5br4e4Me9WfOKqtyX6Ud2cwC449PRamifDm6Auc0rTXokci+Xo1EAgBckiDuYGLjpTvntcGIA+SFcp6uUAaAI879VhWrRteYAqn/edq758brXJ1327QMhgJcZjA3EBjNrgZjOG1PkAjyTGENMjZPq5ECQ0MDE9ERBqFZrk0OJ3i4x/7vyIjBxGERt3takgVJEAp9xq3f769WiPDNvSsJdT3HDOEASPelmoBRYT3Kzt5uMtwauJEgSOCpwrk1DIJCoNUMwj9v7MweP9XSQ8/hJPp496fZTAICvLqcyv2B7nRbrgCA03JN5h8ub7A8VqpB437xHvsOy3l3cyaB4L2uqxhti1WLMcSgZQCw7+bOooO3Pk4JBZIYYXISMV5sKH59UePM10GESRGpIf/bE92HU452HywSJIGIllctrhp6YAK5+fHds0lLtJFMXNwkV6fFqA29mROefqiMJj1h6um4a5vY/92dKGaBxIhU5zJTWW2cJmEgGOmeb3c8FxAfb9mdf2RzyGGv5MvU7QwuEySwKHFp/c/M71zA/2F7b1RajnYdLAqMukMVu2YcfmDYE2MD7H+7/Xlq6cRIJqm4zXM+qd3TGjVBir43KSLlXjiELe5TsX+3/yW/ST45PaAHbKmccWh12AP93JNZywj0kSABIobpiXRHjtZ6faout2tyZMadGLXBCxBcvl6NfaAz+tKdFmObpzWl2+tIIBACYy0t/yj34M7HvsKUK+CGassvicX7alYDwwq+vykIEqPVa+Q9gdYk5+V+UE7lj3+FGbuBM/X5JUT8QwIVSSSZiTgmoFR2MfiqYFFPfjpkyrfWPopwxP47AP1pK1g9/dqeAAAAAElFTkSuQmCC");bottom:50px;left:55px}div.vis-network div.vis-navigation div.vis-button.vis-down{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAB4AAAAeCAYAAAA7MK6iAAAACXBIWXMAAAsTAAALEwEAmpwYAAAKT2lDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVNnVFPpFj333vRCS4iAlEtvUhUIIFJCi4AUkSYqIQkQSoghodkVUcERRUUEG8igiAOOjoCMFVEsDIoK2AfkIaKOg6OIisr74Xuja9a89+bN/rXXPues852zzwfACAyWSDNRNYAMqUIeEeCDx8TG4eQuQIEKJHAAEAizZCFz/SMBAPh+PDwrIsAHvgABeNMLCADATZvAMByH/w/qQplcAYCEAcB0kThLCIAUAEB6jkKmAEBGAYCdmCZTAKAEAGDLY2LjAFAtAGAnf+bTAICd+Jl7AQBblCEVAaCRACATZYhEAGg7AKzPVopFAFgwABRmS8Q5ANgtADBJV2ZIALC3AMDOEAuyAAgMADBRiIUpAAR7AGDIIyN4AISZABRG8lc88SuuEOcqAAB4mbI8uSQ5RYFbCC1xB1dXLh4ozkkXKxQ2YQJhmkAuwnmZGTKBNA/g88wAAKCRFRHgg/P9eM4Ors7ONo62Dl8t6r8G/yJiYuP+5c+rcEAAAOF0ftH+LC+zGoA7BoBt/qIl7gRoXgugdfeLZrIPQLUAoOnaV/Nw+H48PEWhkLnZ2eXk5NhKxEJbYcpXff5nwl/AV/1s+X48/Pf14L7iJIEyXYFHBPjgwsz0TKUcz5IJhGLc5o9H/LcL//wd0yLESWK5WCoU41EScY5EmozzMqUiiUKSKcUl0v9k4t8s+wM+3zUAsGo+AXuRLahdYwP2SycQWHTA4vcAAPK7b8HUKAgDgGiD4c93/+8//UegJQCAZkmScQAAXkQkLlTKsz/HCAAARKCBKrBBG/TBGCzABhzBBdzBC/xgNoRCJMTCQhBCCmSAHHJgKayCQiiGzbAdKmAv1EAdNMBRaIaTcA4uwlW4Dj1wD/phCJ7BKLyBCQRByAgTYSHaiAFiilgjjggXmYX4IcFIBBKLJCDJiBRRIkuRNUgxUopUIFVIHfI9cgI5h1xGupE7yAAygvyGvEcxlIGyUT3UDLVDuag3GoRGogvQZHQxmo8WoJvQcrQaPYw2oefQq2gP2o8+Q8cwwOgYBzPEbDAuxsNCsTgsCZNjy7EirAyrxhqwVqwDu4n1Y8+xdwQSgUXACTYEd0IgYR5BSFhMWE7YSKggHCQ0EdoJNwkDhFHCJyKTqEu0JroR+cQYYjIxh1hILCPWEo8TLxB7iEPENyQSiUMyJ7mQAkmxpFTSEtJG0m5SI+ksqZs0SBojk8naZGuyBzmULCAryIXkneTD5DPkG+Qh8lsKnWJAcaT4U+IoUspqShnlEOU05QZlmDJBVaOaUt2ooVQRNY9aQq2htlKvUYeoEzR1mjnNgxZJS6WtopXTGmgXaPdpr+h0uhHdlR5Ol9BX0svpR+iX6AP0dwwNhhWDx4hnKBmbGAcYZxl3GK+YTKYZ04sZx1QwNzHrmOeZD5lvVVgqtip8FZHKCpVKlSaVGyovVKmqpqreqgtV81XLVI+pXlN9rkZVM1PjqQnUlqtVqp1Q61MbU2epO6iHqmeob1Q/pH5Z/YkGWcNMw09DpFGgsV/jvMYgC2MZs3gsIWsNq4Z1gTXEJrHN2Xx2KruY/R27iz2qqaE5QzNKM1ezUvOUZj8H45hx+Jx0TgnnKKeX836K3hTvKeIpG6Y0TLkxZVxrqpaXllirSKtRq0frvTau7aedpr1Fu1n7gQ5Bx0onXCdHZ4/OBZ3nU9lT3acKpxZNPTr1ri6qa6UbobtEd79up+6Ynr5egJ5Mb6feeb3n+hx9L/1U/W36p/VHDFgGswwkBtsMzhg8xTVxbzwdL8fb8VFDXcNAQ6VhlWGX4YSRudE8o9VGjUYPjGnGXOMk423GbcajJgYmISZLTepN7ppSTbmmKaY7TDtMx83MzaLN1pk1mz0x1zLnm+eb15vft2BaeFostqi2uGVJsuRaplnutrxuhVo5WaVYVVpds0atna0l1rutu6cRp7lOk06rntZnw7Dxtsm2qbcZsOXYBtuutm22fWFnYhdnt8Wuw+6TvZN9un2N/T0HDYfZDqsdWh1+c7RyFDpWOt6azpzuP33F9JbpL2dYzxDP2DPjthPLKcRpnVOb00dnF2e5c4PziIuJS4LLLpc+Lpsbxt3IveRKdPVxXeF60vWdm7Obwu2o26/uNu5p7ofcn8w0nymeWTNz0MPIQ+BR5dE/C5+VMGvfrH5PQ0+BZ7XnIy9jL5FXrdewt6V3qvdh7xc+9j5yn+M+4zw33jLeWV/MN8C3yLfLT8Nvnl+F30N/I/9k/3r/0QCngCUBZwOJgUGBWwL7+Hp8Ib+OPzrbZfay2e1BjKC5QRVBj4KtguXBrSFoyOyQrSH355jOkc5pDoVQfujW0Adh5mGLw34MJ4WHhVeGP45wiFga0TGXNXfR3ENz30T6RJZE3ptnMU85ry1KNSo+qi5qPNo3ujS6P8YuZlnM1VidWElsSxw5LiquNm5svt/87fOH4p3iC+N7F5gvyF1weaHOwvSFpxapLhIsOpZATIhOOJTwQRAqqBaMJfITdyWOCnnCHcJnIi/RNtGI2ENcKh5O8kgqTXqS7JG8NXkkxTOlLOW5hCepkLxMDUzdmzqeFpp2IG0yPTq9MYOSkZBxQqohTZO2Z+pn5mZ2y6xlhbL+xW6Lty8elQfJa7OQrAVZLQq2QqboVFoo1yoHsmdlV2a/zYnKOZarnivN7cyzytuQN5zvn//tEsIS4ZK2pYZLVy0dWOa9rGo5sjxxedsK4xUFK4ZWBqw8uIq2Km3VT6vtV5eufr0mek1rgV7ByoLBtQFr6wtVCuWFfevc1+1dT1gvWd+1YfqGnRs+FYmKrhTbF5cVf9go3HjlG4dvyr+Z3JS0qavEuWTPZtJm6ebeLZ5bDpaql+aXDm4N2dq0Dd9WtO319kXbL5fNKNu7g7ZDuaO/PLi8ZafJzs07P1SkVPRU+lQ27tLdtWHX+G7R7ht7vPY07NXbW7z3/T7JvttVAVVN1WbVZftJ+7P3P66Jqun4lvttXa1ObXHtxwPSA/0HIw6217nU1R3SPVRSj9Yr60cOxx++/p3vdy0NNg1VjZzG4iNwRHnk6fcJ3/ceDTradox7rOEH0x92HWcdL2pCmvKaRptTmvtbYlu6T8w+0dbq3nr8R9sfD5w0PFl5SvNUyWna6YLTk2fyz4ydlZ19fi753GDborZ752PO32oPb++6EHTh0kX/i+c7vDvOXPK4dPKy2+UTV7hXmq86X23qdOo8/pPTT8e7nLuarrlca7nuer21e2b36RueN87d9L158Rb/1tWeOT3dvfN6b/fF9/XfFt1+cif9zsu72Xcn7q28T7xf9EDtQdlD3YfVP1v+3Njv3H9qwHeg89HcR/cGhYPP/pH1jw9DBY+Zj8uGDYbrnjg+OTniP3L96fynQ89kzyaeF/6i/suuFxYvfvjV69fO0ZjRoZfyl5O/bXyl/erA6xmv28bCxh6+yXgzMV70VvvtwXfcdx3vo98PT+R8IH8o/2j5sfVT0Kf7kxmTk/8EA5jz/GMzLdsAAAAgY0hSTQAAeiUAAICDAAD5/wAAgOkAAHUwAADqYAAAOpgAABdvkl/FRgAABpdJREFUeNqcV21QlNcVfp5zX9ikoAvLEsAIIgsoHwpqWAQUNKLNaNv8iZ1JMkNG6/Qj/dDUyCSTtCHpmEkwVk3TToZRMjXj5MOG2KidjIkxQYSAQUAtX6IgIN8su8KCoOzbH4sk4q5g77/33uee555z7rnneYmZDB2MKcJKlyYbqOsZVIgGEOgSHQoy4AKbFFjqAo5dWn/rNAh9OpO852oeJHYxtrmEu4WALhMbxG2ZE9uFAlImDRLY/t/y0b3Ig+u+iWOKsAlgIZSb0OIf15kWtKo1NXh1d5xxiSPEN2wUAHrGOg11jirjWVtJyFnb6YgrzoYwocClu0DI5guPDb43Y2LLp/Iaqf9JCGSErGvIifxd7aqQn/TOJCvFvZ8Hf9haEH+m/6sFQgHBv1Sts/15WmJLkeyl6FuFwFPzny1/ZdE7Nfg/xhv1uUmH2w6kggQp+yqze7d5JbZ8Im+KpucSwI6EN7/cYtlxZarBCts3ptfrtq9odjaGKihE+sV0vRC3u8RqWmmbij149W+Wd5p2rnET6bsqsntyb6+pO3KqkE8FvLxo74lNUX9s9uTJb8/9fG2L81KoogJFYfCm3b9usNq0MXxzw1RsUkDqQICPqf/b/q8sQi3j4WdmtV47OFgNAO6r+DEUFAtFAc9YtpXmRP6hxVsI24cvhyoqnFtrK6jM7isgBa3Dl0O94TeGb255MvzXpUIFjVrhxo/dzgoARBuwFQJkBK9reCnurxfvXX8CRW3yW1G749vT2Br7ysW0oNX1pKDTPG+rm1gHRbibAHLm/7522sKnQCZqFgCUaBCqaS/bEw9vqtWoQROf3dBBiT6KTACImZ3YueqhDdOWjDbFQ4IzIl4elNUX5begU1HD6lPRmULKeghhDcpqnUmZuD3+nkgTH6gZEE9ctlZSoGmG9UIynSCsQVndMyX+IZGiBoHMjHh2SreCglClaSBiSEG8cYnD24bv7CWms/3FocO3hnw13plTggAFb196NdlPM44tC0zrSg5ItXmyEz070UEKCMRqQgkkBQ9NvL2eSJ+revoJTORSpoT6do4/7/7UShBFHQexM+HdfyUHWO8iN/uaRzX3/QjUSLlnqM72F4cCRIY5u9Zf+Y+BAv4AvzpkQ7WAIBRujA/7Vg6cia9xlId6InafVEAAGnQMUCSkb6zTMPdBy8hU3JjrphIq+CrD+Mvxeyumrr+4IH9y7o2GF5eDghuuGx4L2zbWZ9Dc0RoQRbkkFNRdP2/0BH7EtLJLKCjr+zqh2l5u8haZ847vTBW24kRFQXKAtcsT5oqz3igQENIoECkjBJUDZSGewBlBj/ammjLrdX1c/t70ero34gMte9IByLLAjPrUwKweT5jawQshdIuGMiF5XEBU2koivBl9NeEfJeYHwuxtI81zPrn2z6ip60c6DkV1jLTOCTaE2HNjd5Z4s9MwWBOhqEHp/I9cWDtUrJNoHm4KO9P7hdnTBoMYXI8Gb6gVCg63FS53jg9O5tA57tSOdHywnCAygrJrfcTgUe5U2cvNHSPtYYoKCWlrTgsIneB2AfFR+4F4b6f9ZdTzF6P8Ytud407/dy/nL7k9X9i8J9l5y+Ef6RfbnjPvWa8N5suez+KFCgqyPY95Lnd3stv2AcBZ2+mFbze+lui1xc3dXCUUlPafXNx4/aKxcajWWNp/MklRw8/mPFntbd+h1oLE847KhQQxejVg36QQqD0MPTzHv42Ux+uGasJNBnPfwllJd71kkX7RQ3WDNf7dox3BLcNNs6vt34bbbvYHJhlTGp6O+JVHb0/2HJtX1PH+aqECqG/5YN1nlXcokGvvO6vCc4x+QskotxVHB/qa+xbOWuzw8NB3nuo+Ht0z2hHsuGU3GrWAoZfi3jrxgHpw3BPpobaCH7vbqOw6mHI836vYW3Eqcq9AtioqbJy7ufQ3lhfu8sR+s9+3vL8klACsQSu7AnxMY1MxH7YXJp7oPpLulrrj+9575Ni2aeVt1teWfEWfHQLCaspseHzOU7VWU+aM5G2NoyL4i+6j8XWDNQsmGsKu/cv+nTtjQb/mm7hfENyvqEAK5v8opjPJaL26KGBpd5TfguuBvuZRgBgY6zO0jlyZXXe9JqR+8MK8ntHOMHfHIkhu2b/0yIH7/oXJ0yFlxYnPUdRbvuILgO7+y+91l6Ka6M+cnCf4fMSypXvymHf/vzBTD3CuNGUFKT8lmK5Rs5ASqKiBlAGBXFaiSuni0fkp1pJ7Ed4e/xsAqLk46EWsG1EAAAAASUVORK5CYII=");bottom:10px;left:55px}div.vis-network div.vis-navigation div.vis-button.vis-left{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAB4AAAAeCAYAAAA7MK6iAAAACXBIWXMAAAsTAAALEwEAmpwYAAAKT2lDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVNnVFPpFj333vRCS4iAlEtvUhUIIFJCi4AUkSYqIQkQSoghodkVUcERRUUEG8igiAOOjoCMFVEsDIoK2AfkIaKOg6OIisr74Xuja9a89+bN/rXXPues852zzwfACAyWSDNRNYAMqUIeEeCDx8TG4eQuQIEKJHAAEAizZCFz/SMBAPh+PDwrIsAHvgABeNMLCADATZvAMByH/w/qQplcAYCEAcB0kThLCIAUAEB6jkKmAEBGAYCdmCZTAKAEAGDLY2LjAFAtAGAnf+bTAICd+Jl7AQBblCEVAaCRACATZYhEAGg7AKzPVopFAFgwABRmS8Q5ANgtADBJV2ZIALC3AMDOEAuyAAgMADBRiIUpAAR7AGDIIyN4AISZABRG8lc88SuuEOcqAAB4mbI8uSQ5RYFbCC1xB1dXLh4ozkkXKxQ2YQJhmkAuwnmZGTKBNA/g88wAAKCRFRHgg/P9eM4Ors7ONo62Dl8t6r8G/yJiYuP+5c+rcEAAAOF0ftH+LC+zGoA7BoBt/qIl7gRoXgugdfeLZrIPQLUAoOnaV/Nw+H48PEWhkLnZ2eXk5NhKxEJbYcpXff5nwl/AV/1s+X48/Pf14L7iJIEyXYFHBPjgwsz0TKUcz5IJhGLc5o9H/LcL//wd0yLESWK5WCoU41EScY5EmozzMqUiiUKSKcUl0v9k4t8s+wM+3zUAsGo+AXuRLahdYwP2SycQWHTA4vcAAPK7b8HUKAgDgGiD4c93/+8//UegJQCAZkmScQAAXkQkLlTKsz/HCAAARKCBKrBBG/TBGCzABhzBBdzBC/xgNoRCJMTCQhBCCmSAHHJgKayCQiiGzbAdKmAv1EAdNMBRaIaTcA4uwlW4Dj1wD/phCJ7BKLyBCQRByAgTYSHaiAFiilgjjggXmYX4IcFIBBKLJCDJiBRRIkuRNUgxUopUIFVIHfI9cgI5h1xGupE7yAAygvyGvEcxlIGyUT3UDLVDuag3GoRGogvQZHQxmo8WoJvQcrQaPYw2oefQq2gP2o8+Q8cwwOgYBzPEbDAuxsNCsTgsCZNjy7EirAyrxhqwVqwDu4n1Y8+xdwQSgUXACTYEd0IgYR5BSFhMWE7YSKggHCQ0EdoJNwkDhFHCJyKTqEu0JroR+cQYYjIxh1hILCPWEo8TLxB7iEPENyQSiUMyJ7mQAkmxpFTSEtJG0m5SI+ksqZs0SBojk8naZGuyBzmULCAryIXkneTD5DPkG+Qh8lsKnWJAcaT4U+IoUspqShnlEOU05QZlmDJBVaOaUt2ooVQRNY9aQq2htlKvUYeoEzR1mjnNgxZJS6WtopXTGmgXaPdpr+h0uhHdlR5Ol9BX0svpR+iX6AP0dwwNhhWDx4hnKBmbGAcYZxl3GK+YTKYZ04sZx1QwNzHrmOeZD5lvVVgqtip8FZHKCpVKlSaVGyovVKmqpqreqgtV81XLVI+pXlN9rkZVM1PjqQnUlqtVqp1Q61MbU2epO6iHqmeob1Q/pH5Z/YkGWcNMw09DpFGgsV/jvMYgC2MZs3gsIWsNq4Z1gTXEJrHN2Xx2KruY/R27iz2qqaE5QzNKM1ezUvOUZj8H45hx+Jx0TgnnKKeX836K3hTvKeIpG6Y0TLkxZVxrqpaXllirSKtRq0frvTau7aedpr1Fu1n7gQ5Bx0onXCdHZ4/OBZ3nU9lT3acKpxZNPTr1ri6qa6UbobtEd79up+6Ynr5egJ5Mb6feeb3n+hx9L/1U/W36p/VHDFgGswwkBtsMzhg8xTVxbzwdL8fb8VFDXcNAQ6VhlWGX4YSRudE8o9VGjUYPjGnGXOMk423GbcajJgYmISZLTepN7ppSTbmmKaY7TDtMx83MzaLN1pk1mz0x1zLnm+eb15vft2BaeFostqi2uGVJsuRaplnutrxuhVo5WaVYVVpds0atna0l1rutu6cRp7lOk06rntZnw7Dxtsm2qbcZsOXYBtuutm22fWFnYhdnt8Wuw+6TvZN9un2N/T0HDYfZDqsdWh1+c7RyFDpWOt6azpzuP33F9JbpL2dYzxDP2DPjthPLKcRpnVOb00dnF2e5c4PziIuJS4LLLpc+Lpsbxt3IveRKdPVxXeF60vWdm7Obwu2o26/uNu5p7ofcn8w0nymeWTNz0MPIQ+BR5dE/C5+VMGvfrH5PQ0+BZ7XnIy9jL5FXrdewt6V3qvdh7xc+9j5yn+M+4zw33jLeWV/MN8C3yLfLT8Nvnl+F30N/I/9k/3r/0QCngCUBZwOJgUGBWwL7+Hp8Ib+OPzrbZfay2e1BjKC5QRVBj4KtguXBrSFoyOyQrSH355jOkc5pDoVQfujW0Adh5mGLw34MJ4WHhVeGP45wiFga0TGXNXfR3ENz30T6RJZE3ptnMU85ry1KNSo+qi5qPNo3ujS6P8YuZlnM1VidWElsSxw5LiquNm5svt/87fOH4p3iC+N7F5gvyF1weaHOwvSFpxapLhIsOpZATIhOOJTwQRAqqBaMJfITdyWOCnnCHcJnIi/RNtGI2ENcKh5O8kgqTXqS7JG8NXkkxTOlLOW5hCepkLxMDUzdmzqeFpp2IG0yPTq9MYOSkZBxQqohTZO2Z+pn5mZ2y6xlhbL+xW6Lty8elQfJa7OQrAVZLQq2QqboVFoo1yoHsmdlV2a/zYnKOZarnivN7cyzytuQN5zvn//tEsIS4ZK2pYZLVy0dWOa9rGo5sjxxedsK4xUFK4ZWBqw8uIq2Km3VT6vtV5eufr0mek1rgV7ByoLBtQFr6wtVCuWFfevc1+1dT1gvWd+1YfqGnRs+FYmKrhTbF5cVf9go3HjlG4dvyr+Z3JS0qavEuWTPZtJm6ebeLZ5bDpaql+aXDm4N2dq0Dd9WtO319kXbL5fNKNu7g7ZDuaO/PLi8ZafJzs07P1SkVPRU+lQ27tLdtWHX+G7R7ht7vPY07NXbW7z3/T7JvttVAVVN1WbVZftJ+7P3P66Jqun4lvttXa1ObXHtxwPSA/0HIw6217nU1R3SPVRSj9Yr60cOxx++/p3vdy0NNg1VjZzG4iNwRHnk6fcJ3/ceDTradox7rOEH0x92HWcdL2pCmvKaRptTmvtbYlu6T8w+0dbq3nr8R9sfD5w0PFl5SvNUyWna6YLTk2fyz4ydlZ19fi753GDborZ752PO32oPb++6EHTh0kX/i+c7vDvOXPK4dPKy2+UTV7hXmq86X23qdOo8/pPTT8e7nLuarrlca7nuer21e2b36RueN87d9L158Rb/1tWeOT3dvfN6b/fF9/XfFt1+cif9zsu72Xcn7q28T7xf9EDtQdlD3YfVP1v+3Njv3H9qwHeg89HcR/cGhYPP/pH1jw9DBY+Zj8uGDYbrnjg+OTniP3L96fynQ89kzyaeF/6i/suuFxYvfvjV69fO0ZjRoZfyl5O/bXyl/erA6xmv28bCxh6+yXgzMV70VvvtwXfcdx3vo98PT+R8IH8o/2j5sfVT0Kf7kxmTk/8EA5jz/GMzLdsAAAAgY0hSTQAAeiUAAICDAAD5/wAAgOkAAHUwAADqYAAAOpgAABdvkl/FRgAABt5JREFUeNqsl2lUlOcVx//3Pi9DZRsGBgYiS2RYBQKIjAhEJW4pNrXNMbZpWtTGNkttYmJMG5soSZckRk+0p+dYPYY0Gk0ihlhRj63GhVUgBhDD5oIOy8AAMwzD4lCYtx+GqCQKuNyP7/Pc+3u2+7/3JUzEZFBYLh62S7yIZDmVBEIBqOwsQ4DNdtBFASq2A4cuZAwVgCCPF5LGHM0Chz+E1XamzUyAzCMO7IhMI+5MDCK+HpCANd+U2rYgC/Y7BoflYgVA2RAOoNYtyjDTe45+hk96e5QywaJR+NsAwDhocK61VCjLTYWaclNB0OW+en8mhl22g8C/rn7U+uGEwdov+C0i+Q0mIFWzoD7zwVU1czQ/6pjIreR3HPX5VL9jalHXiQgmBoH+XLHAtH5csDaXtxDLLzIBv5jyfOmG2H9U4S7snbpX43KaPpgBIhDx1rPzOlbfPC5GQT/nd1mS1zABa6PfPf5y5F/rcJeWpp7fPkly6f7KXBRCoOSATFfXll19x74HDsvFCghsJAG8HrvlvytCXm7EPVqc5wyzp5NX15muE1omKXXyMnd9yy5r5Q3wPghvJzrLAlimXV38+7D1DbhPFq1M6O4b6rPVWKsCBfHi5EWWv9TkQBYAEPpLvERMC9N8FtRvjt9dPl6wwo5jPvuas7WV5jNqEjz8wA+CBsaan+w9x1hrrXJtuaZX97ooLfqPLCUEGRR+iOwAsF2X98Uc30W3fb02u41frVqeVmo6FUkkwCAwCWxJ2Ls/0TPFNBb8TNdp9WvnVz4OAKdmX2QOzcMsAAjziDGMBd3asCF6SXHyknJTfqQTK+zpvhnVKT5zawCgzFTgN94pJXvP7gxxjTAIkpB+MnSWRMQZYEDnPVt/K4ejbZ/77726Lb6h95tAAiPELaJ1bcTbRfGeM8xv1azWSeyEa0P9igk+Nr1+oNFfkpwzJCJKIQA679ntN08yDXYo3qh+LuUrc0E4EcNL4dP7VNDzpU8FP3vpekoQQ5CEw4bPdEfa9+sAgEZUmkmAAAS5hLQ9p11XGO+pM8V5JLUfMeQARDMlEMKIGFOVCZYb0C7Fz0oeXmIZ6nZzYoV9od/jVS+GbahUOnn9b7T6sEOviUGyA8bMDlUa0W79wBW/bZf+lrY98cDBUI8YCxGDgHCJiVVEDN8R7QWAE8Z/+1mGut2i3eP1r0S+XRztkdBzq6NbF7WpbF3UprKxjvfHxbrfttla/QBArVDbJJIAQCURMRg8ugrKIAKBSNxzHtN3VdmxY0iQYSZmTeegwTlgknYAAB7RZBh2Nm7urbeeC1r19ROT52kWn3shfH2Fu1AO3RxjY/0fdac7/hPPJMDE11GC+HpBJmIEuAS3Oa6w01lybMbMgvgCE6O255zy24DeCr/Bvckn9+u8ZjXYIYvjxoMJy8oeXZrT9GHIqMWTwA2oI6cFMeDIcAiSEOyibXsmZG0hAFzuq1OyY6xBAnMJgdPOmks08zU/bbsB9x18P37PqS/b8+o/a96ZcLm3PmBH46Z5x40HW1eFvl4Uq0w0MwiCBOb7/qTsd6GvVY537DXWas1Iw1AiNJnOgwJi+bXhAbE08OnvaXSIW0TvYw88eaF/uM/WNdju3m5r9TlhPBzVNNDoPGC/5tRma/GJ80xqjPPUjVuvP2narrMOWd1Jlv/E1fN782UiNPZf9C/qOKa+ndOz2j+cz046sn+6KrVOsODirpOxld0lUxmEBK/ktvGgFd2l6taBZn9BAtEz5xYIvAn4/8rFKkgstAyZ6Yf+S67ezlkiSU73XXRV6xqh93TyssR4JF75efBvymLdE03jgT/Wb5tutLWpGbTm7wHZxQQAT+yDuKLyHRIk4cnAZ4pfCF9/HvfR9uh3xBxtz00BANsVDylnac6wAICaHMiBmW5NRLy4trcq0MtZ3RnpHme5H9AvjYeCc1t3pzMJgOSVnyw4eHZUB9Kyu68iMFPpysSppab8UJVC3Rnp/pDlXqF7mnYsdKQbv7cr6fDGW/Zczbt6jgUtV6kIlFxuyg/tH+6zJXmlGe8G+mlzdsyB1j3pTAwZ9q3/Sspbc9tmDwD0H3UffXCFlyuTlFpnPRdYb612c5c8+idPCu6fCLDKUubzsf6fSaWm0wmO9hbvZU8fDR2zoZ97OuppAu0UJEDEmOISZohT6q7Gek5rD3GN6FEp1DaAYB7sdNYPXPao7anS1Fmrg402g7+jYhGIaOXOaQc+uONfmCwZXJIf8xKx2KRgxYgOS+CROuyoyQKCxIhkOr4T6JWgxGnvZ1HWnf/CfHcBXxcnpRHxYwRKkUjSErFKkAQiNjP4kmBRTHbKm5KkKxwL+K39fwDX1XGF8ct++QAAAABJRU5ErkJggg==");bottom:10px;left:15px}div.vis-network div.vis-navigation div.vis-button.vis-right{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAB4AAAAeCAYAAAA7MK6iAAAACXBIWXMAAAsTAAALEwEAmpwYAAAKT2lDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVNnVFPpFj333vRCS4iAlEtvUhUIIFJCi4AUkSYqIQkQSoghodkVUcERRUUEG8igiAOOjoCMFVEsDIoK2AfkIaKOg6OIisr74Xuja9a89+bN/rXXPues852zzwfACAyWSDNRNYAMqUIeEeCDx8TG4eQuQIEKJHAAEAizZCFz/SMBAPh+PDwrIsAHvgABeNMLCADATZvAMByH/w/qQplcAYCEAcB0kThLCIAUAEB6jkKmAEBGAYCdmCZTAKAEAGDLY2LjAFAtAGAnf+bTAICd+Jl7AQBblCEVAaCRACATZYhEAGg7AKzPVopFAFgwABRmS8Q5ANgtADBJV2ZIALC3AMDOEAuyAAgMADBRiIUpAAR7AGDIIyN4AISZABRG8lc88SuuEOcqAAB4mbI8uSQ5RYFbCC1xB1dXLh4ozkkXKxQ2YQJhmkAuwnmZGTKBNA/g88wAAKCRFRHgg/P9eM4Ors7ONo62Dl8t6r8G/yJiYuP+5c+rcEAAAOF0ftH+LC+zGoA7BoBt/qIl7gRoXgugdfeLZrIPQLUAoOnaV/Nw+H48PEWhkLnZ2eXk5NhKxEJbYcpXff5nwl/AV/1s+X48/Pf14L7iJIEyXYFHBPjgwsz0TKUcz5IJhGLc5o9H/LcL//wd0yLESWK5WCoU41EScY5EmozzMqUiiUKSKcUl0v9k4t8s+wM+3zUAsGo+AXuRLahdYwP2SycQWHTA4vcAAPK7b8HUKAgDgGiD4c93/+8//UegJQCAZkmScQAAXkQkLlTKsz/HCAAARKCBKrBBG/TBGCzABhzBBdzBC/xgNoRCJMTCQhBCCmSAHHJgKayCQiiGzbAdKmAv1EAdNMBRaIaTcA4uwlW4Dj1wD/phCJ7BKLyBCQRByAgTYSHaiAFiilgjjggXmYX4IcFIBBKLJCDJiBRRIkuRNUgxUopUIFVIHfI9cgI5h1xGupE7yAAygvyGvEcxlIGyUT3UDLVDuag3GoRGogvQZHQxmo8WoJvQcrQaPYw2oefQq2gP2o8+Q8cwwOgYBzPEbDAuxsNCsTgsCZNjy7EirAyrxhqwVqwDu4n1Y8+xdwQSgUXACTYEd0IgYR5BSFhMWE7YSKggHCQ0EdoJNwkDhFHCJyKTqEu0JroR+cQYYjIxh1hILCPWEo8TLxB7iEPENyQSiUMyJ7mQAkmxpFTSEtJG0m5SI+ksqZs0SBojk8naZGuyBzmULCAryIXkneTD5DPkG+Qh8lsKnWJAcaT4U+IoUspqShnlEOU05QZlmDJBVaOaUt2ooVQRNY9aQq2htlKvUYeoEzR1mjnNgxZJS6WtopXTGmgXaPdpr+h0uhHdlR5Ol9BX0svpR+iX6AP0dwwNhhWDx4hnKBmbGAcYZxl3GK+YTKYZ04sZx1QwNzHrmOeZD5lvVVgqtip8FZHKCpVKlSaVGyovVKmqpqreqgtV81XLVI+pXlN9rkZVM1PjqQnUlqtVqp1Q61MbU2epO6iHqmeob1Q/pH5Z/YkGWcNMw09DpFGgsV/jvMYgC2MZs3gsIWsNq4Z1gTXEJrHN2Xx2KruY/R27iz2qqaE5QzNKM1ezUvOUZj8H45hx+Jx0TgnnKKeX836K3hTvKeIpG6Y0TLkxZVxrqpaXllirSKtRq0frvTau7aedpr1Fu1n7gQ5Bx0onXCdHZ4/OBZ3nU9lT3acKpxZNPTr1ri6qa6UbobtEd79up+6Ynr5egJ5Mb6feeb3n+hx9L/1U/W36p/VHDFgGswwkBtsMzhg8xTVxbzwdL8fb8VFDXcNAQ6VhlWGX4YSRudE8o9VGjUYPjGnGXOMk423GbcajJgYmISZLTepN7ppSTbmmKaY7TDtMx83MzaLN1pk1mz0x1zLnm+eb15vft2BaeFostqi2uGVJsuRaplnutrxuhVo5WaVYVVpds0atna0l1rutu6cRp7lOk06rntZnw7Dxtsm2qbcZsOXYBtuutm22fWFnYhdnt8Wuw+6TvZN9un2N/T0HDYfZDqsdWh1+c7RyFDpWOt6azpzuP33F9JbpL2dYzxDP2DPjthPLKcRpnVOb00dnF2e5c4PziIuJS4LLLpc+Lpsbxt3IveRKdPVxXeF60vWdm7Obwu2o26/uNu5p7ofcn8w0nymeWTNz0MPIQ+BR5dE/C5+VMGvfrH5PQ0+BZ7XnIy9jL5FXrdewt6V3qvdh7xc+9j5yn+M+4zw33jLeWV/MN8C3yLfLT8Nvnl+F30N/I/9k/3r/0QCngCUBZwOJgUGBWwL7+Hp8Ib+OPzrbZfay2e1BjKC5QRVBj4KtguXBrSFoyOyQrSH355jOkc5pDoVQfujW0Adh5mGLw34MJ4WHhVeGP45wiFga0TGXNXfR3ENz30T6RJZE3ptnMU85ry1KNSo+qi5qPNo3ujS6P8YuZlnM1VidWElsSxw5LiquNm5svt/87fOH4p3iC+N7F5gvyF1weaHOwvSFpxapLhIsOpZATIhOOJTwQRAqqBaMJfITdyWOCnnCHcJnIi/RNtGI2ENcKh5O8kgqTXqS7JG8NXkkxTOlLOW5hCepkLxMDUzdmzqeFpp2IG0yPTq9MYOSkZBxQqohTZO2Z+pn5mZ2y6xlhbL+xW6Lty8elQfJa7OQrAVZLQq2QqboVFoo1yoHsmdlV2a/zYnKOZarnivN7cyzytuQN5zvn//tEsIS4ZK2pYZLVy0dWOa9rGo5sjxxedsK4xUFK4ZWBqw8uIq2Km3VT6vtV5eufr0mek1rgV7ByoLBtQFr6wtVCuWFfevc1+1dT1gvWd+1YfqGnRs+FYmKrhTbF5cVf9go3HjlG4dvyr+Z3JS0qavEuWTPZtJm6ebeLZ5bDpaql+aXDm4N2dq0Dd9WtO319kXbL5fNKNu7g7ZDuaO/PLi8ZafJzs07P1SkVPRU+lQ27tLdtWHX+G7R7ht7vPY07NXbW7z3/T7JvttVAVVN1WbVZftJ+7P3P66Jqun4lvttXa1ObXHtxwPSA/0HIw6217nU1R3SPVRSj9Yr60cOxx++/p3vdy0NNg1VjZzG4iNwRHnk6fcJ3/ceDTradox7rOEH0x92HWcdL2pCmvKaRptTmvtbYlu6T8w+0dbq3nr8R9sfD5w0PFl5SvNUyWna6YLTk2fyz4ydlZ19fi753GDborZ752PO32oPb++6EHTh0kX/i+c7vDvOXPK4dPKy2+UTV7hXmq86X23qdOo8/pPTT8e7nLuarrlca7nuer21e2b36RueN87d9L158Rb/1tWeOT3dvfN6b/fF9/XfFt1+cif9zsu72Xcn7q28T7xf9EDtQdlD3YfVP1v+3Njv3H9qwHeg89HcR/cGhYPP/pH1jw9DBY+Zj8uGDYbrnjg+OTniP3L96fynQ89kzyaeF/6i/suuFxYvfvjV69fO0ZjRoZfyl5O/bXyl/erA6xmv28bCxh6+yXgzMV70VvvtwXfcdx3vo98PT+R8IH8o/2j5sfVT0Kf7kxmTk/8EA5jz/GMzLdsAAAAgY0hSTQAAeiUAAICDAAD5/wAAgOkAAHUwAADqYAAAOpgAABdvkl/FRgAABs1JREFUeNqsl3tQlOcVxp9z3m+XygK7C4sLxkW5o4CAkYssFSkRjabjJEOSJm1IbZx2krapiZdeprW0NVVJ0pqMM0kYJQlqkoZImGioE1ItiCAgIsFwE4Es99vCslwChf36xy5EW1A0Pn9+73fO772e93kJC5EMCszFd20SbyFZNpJAAACtjWUI8KAN1CRAJTbg9LXNU+dBkG+Xkm7Zmg4OWoUdNqZXmQCZHQFsz0yOcCYGEc8mJGDnl2UTh5AO2x2DA3OxDaAsCDvQ32VF11qP9aZYz6SeFeooi17pPQEAvZNdTnWWKnWFuVhfYT7v0zza4M3EsMk2EPgnNZusby8Y7P8x/5lI/gMTYNSnNKQt/0Xtev1DfQtZlaK+M54fmDJXXhg4G8zEINBfqlLMe28L9s/lQ8Tyr5iAJ32fK/tj+OFq3IUO1O+JyGk7GgsiEPFrlQ/07bixXdwEPckHWZJ3MgG7Qw9+/mLIS/W4SyXoNvQskpyHLg1e8CNQ3NI0laoje7Tg/8CBudgGgQwSwO/DD322ze/FFnxLRWhiBzUK94GLA2f9mSTjfU+7mjqyrVe+AX8I4aGgShbA0/47Sn4ZuLcR90ih6qih0anRiVprtUEQb43bYtlXmwNZAEDAj/ACMW1M8ExpeDXyWMVCEl4yF7vntR/zLeov8JJlWfZR+Y3N92+cx/reOmu1quNrk27EWW0xvWspJcigoNNkA4C3Yk59vH7xltvu3ktDxe7PX34ilQCQfeci1j2xfn94ZrGCneY8uxcHCnW/vbr9EQD4d2ITc8AprAOAQLewroVAAaB8oMiLiRHvmVy7znNTjWCFrXKoJOSHFQ+kvnF9f+jco07s91MFdwmSkHQuYB0T8WYwIcYj0bTQdRufGlFKJMFVaCb/GvZW6aGI4yeXOwd2mr/u05zsyDY+W5X64Nm+fO85NpuJiCFJTpslIoonADEeiT2zIzIXuh+o25PQNtbsNVMOBUn2g08MiSTHN3uZjNTEDr4dnX/6H+1H/XPasmKvW+sMGfW/MXzende4K3h/ibvSYxIAItyie/K7cgCitQxCIBFjpTrKMgM+WPfrhLbxFi9iMQtlYjAJSCSBSYBAIPBNI3p86TPXj8bk56R4PVylFE626uFLQc9efiTVPDmgBIAAtzALEYNBQRITa4kYix21FwBax655CVagPLk7806Pj1qo/7MraF/FQ14/aMhszYhvGqn3KTef89rklWrSKXUTkn3mtJK9Bzf3XJA0e/PcrdgxIwSCDPmbZMQgABJkDBKzvn+yy2npIv9xAPB1Ceo2jTZ7Gc8afipIgEhAkACDwcSQQZBIIGnx5it7gg+U3wgcnbZKR1r+FnW+v2DVtDwtXCXNSKz797oAwDzZ7ySRAIBBFsTXmBh1w1+oZ4J3h+wv9lUFdbMDOrO+5IAqWIGZthuV13nC77nKRx8r7PssyibLIkoT1/h65HsfzWyu5tF6NYNB4EYJzKUETqgcLNVv0D/cDQBrNAnm9+LOfTLfNB5u2hf5z+6TMexYji+tVdrM5leMbWOtSwQx/F1C2rcuebIqwSO568a4WmuN3mEYSiUi+pRl2l1pLvYBsKArUKVwnZRYgdHpMWVG4+/WXhwoDBXE7OmkHzJ6JNemLfv51bniGqzVPoIkyLbpfK7ZMFIkE6FlrMn7Ql+BbiHg+zXGbgLjylDpyosD58KZmKM0cfWHI9//aD5o1VCZrnO83VuQQOja5PMCfwK8n3K2ChIbLVOD9KB36le3A+u/s2Q81C2yRavQmQNdVnamLnmq4nHD9jpB0rwm77jpjTW9E906Bu18fWlWCQHAox9CtGoXTwmS8IThZyXPB+29inuoE6bMsDM9ufEAMNHqJuU8ljMtAKA2B7IhzaWNiLfWjVQb3J10/SGuEZZ7Af1X7+lluZ3HkpgEQPL291M+qbzJgXQcG60ypKlVTGwsMxcFaJW6/hDXVZZvCz3RlrmRiQHwy9nRn2bM6bnas4cLfH6s1RIorsJcFDA2PToR7Z7QezfQD9qzwvI6TyTZC47ttXeiT+2c1+wBgOndoTPLt7mrmCRjvfULQ4O1xsVVchu7b9GysYUAqy3lnsdNb0aXmQuj7PYWL2etuRl6S0OfXLjiGQIdEY6K5esc2BWhjvkqXLO6x08VPKxV6iYAwuBkv5NpvNmtbrhaX2+tWdY70eVNINhtLW0/sjrv6B0/YdJlcGlR2AvE4hUlKwHQ7BU5cz8LRx0HaPY7gXb53L/67+mUfudPmP/twOWS6AQi/j6B4iWS/IlYK+yGYJDB1wWLErLRKd/omOJbAWf03wEAyO9m+/TtS3AAAAAASUVORK5CYII=");bottom:10px;left:95px}div.vis-network div.vis-navigation div.vis-button.vis-zoomIn{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAB4AAAAeCAYAAAA7MK6iAAAACXBIWXMAAAsTAAALEwEAmpwYAAAKT2lDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVNnVFPpFj333vRCS4iAlEtvUhUIIFJCi4AUkSYqIQkQSoghodkVUcERRUUEG8igiAOOjoCMFVEsDIoK2AfkIaKOg6OIisr74Xuja9a89+bN/rXXPues852zzwfACAyWSDNRNYAMqUIeEeCDx8TG4eQuQIEKJHAAEAizZCFz/SMBAPh+PDwrIsAHvgABeNMLCADATZvAMByH/w/qQplcAYCEAcB0kThLCIAUAEB6jkKmAEBGAYCdmCZTAKAEAGDLY2LjAFAtAGAnf+bTAICd+Jl7AQBblCEVAaCRACATZYhEAGg7AKzPVopFAFgwABRmS8Q5ANgtADBJV2ZIALC3AMDOEAuyAAgMADBRiIUpAAR7AGDIIyN4AISZABRG8lc88SuuEOcqAAB4mbI8uSQ5RYFbCC1xB1dXLh4ozkkXKxQ2YQJhmkAuwnmZGTKBNA/g88wAAKCRFRHgg/P9eM4Ors7ONo62Dl8t6r8G/yJiYuP+5c+rcEAAAOF0ftH+LC+zGoA7BoBt/qIl7gRoXgugdfeLZrIPQLUAoOnaV/Nw+H48PEWhkLnZ2eXk5NhKxEJbYcpXff5nwl/AV/1s+X48/Pf14L7iJIEyXYFHBPjgwsz0TKUcz5IJhGLc5o9H/LcL//wd0yLESWK5WCoU41EScY5EmozzMqUiiUKSKcUl0v9k4t8s+wM+3zUAsGo+AXuRLahdYwP2SycQWHTA4vcAAPK7b8HUKAgDgGiD4c93/+8//UegJQCAZkmScQAAXkQkLlTKsz/HCAAARKCBKrBBG/TBGCzABhzBBdzBC/xgNoRCJMTCQhBCCmSAHHJgKayCQiiGzbAdKmAv1EAdNMBRaIaTcA4uwlW4Dj1wD/phCJ7BKLyBCQRByAgTYSHaiAFiilgjjggXmYX4IcFIBBKLJCDJiBRRIkuRNUgxUopUIFVIHfI9cgI5h1xGupE7yAAygvyGvEcxlIGyUT3UDLVDuag3GoRGogvQZHQxmo8WoJvQcrQaPYw2oefQq2gP2o8+Q8cwwOgYBzPEbDAuxsNCsTgsCZNjy7EirAyrxhqwVqwDu4n1Y8+xdwQSgUXACTYEd0IgYR5BSFhMWE7YSKggHCQ0EdoJNwkDhFHCJyKTqEu0JroR+cQYYjIxh1hILCPWEo8TLxB7iEPENyQSiUMyJ7mQAkmxpFTSEtJG0m5SI+ksqZs0SBojk8naZGuyBzmULCAryIXkneTD5DPkG+Qh8lsKnWJAcaT4U+IoUspqShnlEOU05QZlmDJBVaOaUt2ooVQRNY9aQq2htlKvUYeoEzR1mjnNgxZJS6WtopXTGmgXaPdpr+h0uhHdlR5Ol9BX0svpR+iX6AP0dwwNhhWDx4hnKBmbGAcYZxl3GK+YTKYZ04sZx1QwNzHrmOeZD5lvVVgqtip8FZHKCpVKlSaVGyovVKmqpqreqgtV81XLVI+pXlN9rkZVM1PjqQnUlqtVqp1Q61MbU2epO6iHqmeob1Q/pH5Z/YkGWcNMw09DpFGgsV/jvMYgC2MZs3gsIWsNq4Z1gTXEJrHN2Xx2KruY/R27iz2qqaE5QzNKM1ezUvOUZj8H45hx+Jx0TgnnKKeX836K3hTvKeIpG6Y0TLkxZVxrqpaXllirSKtRq0frvTau7aedpr1Fu1n7gQ5Bx0onXCdHZ4/OBZ3nU9lT3acKpxZNPTr1ri6qa6UbobtEd79up+6Ynr5egJ5Mb6feeb3n+hx9L/1U/W36p/VHDFgGswwkBtsMzhg8xTVxbzwdL8fb8VFDXcNAQ6VhlWGX4YSRudE8o9VGjUYPjGnGXOMk423GbcajJgYmISZLTepN7ppSTbmmKaY7TDtMx83MzaLN1pk1mz0x1zLnm+eb15vft2BaeFostqi2uGVJsuRaplnutrxuhVo5WaVYVVpds0atna0l1rutu6cRp7lOk06rntZnw7Dxtsm2qbcZsOXYBtuutm22fWFnYhdnt8Wuw+6TvZN9un2N/T0HDYfZDqsdWh1+c7RyFDpWOt6azpzuP33F9JbpL2dYzxDP2DPjthPLKcRpnVOb00dnF2e5c4PziIuJS4LLLpc+Lpsbxt3IveRKdPVxXeF60vWdm7Obwu2o26/uNu5p7ofcn8w0nymeWTNz0MPIQ+BR5dE/C5+VMGvfrH5PQ0+BZ7XnIy9jL5FXrdewt6V3qvdh7xc+9j5yn+M+4zw33jLeWV/MN8C3yLfLT8Nvnl+F30N/I/9k/3r/0QCngCUBZwOJgUGBWwL7+Hp8Ib+OPzrbZfay2e1BjKC5QRVBj4KtguXBrSFoyOyQrSH355jOkc5pDoVQfujW0Adh5mGLw34MJ4WHhVeGP45wiFga0TGXNXfR3ENz30T6RJZE3ptnMU85ry1KNSo+qi5qPNo3ujS6P8YuZlnM1VidWElsSxw5LiquNm5svt/87fOH4p3iC+N7F5gvyF1weaHOwvSFpxapLhIsOpZATIhOOJTwQRAqqBaMJfITdyWOCnnCHcJnIi/RNtGI2ENcKh5O8kgqTXqS7JG8NXkkxTOlLOW5hCepkLxMDUzdmzqeFpp2IG0yPTq9MYOSkZBxQqohTZO2Z+pn5mZ2y6xlhbL+xW6Lty8elQfJa7OQrAVZLQq2QqboVFoo1yoHsmdlV2a/zYnKOZarnivN7cyzytuQN5zvn//tEsIS4ZK2pYZLVy0dWOa9rGo5sjxxedsK4xUFK4ZWBqw8uIq2Km3VT6vtV5eufr0mek1rgV7ByoLBtQFr6wtVCuWFfevc1+1dT1gvWd+1YfqGnRs+FYmKrhTbF5cVf9go3HjlG4dvyr+Z3JS0qavEuWTPZtJm6ebeLZ5bDpaql+aXDm4N2dq0Dd9WtO319kXbL5fNKNu7g7ZDuaO/PLi8ZafJzs07P1SkVPRU+lQ27tLdtWHX+G7R7ht7vPY07NXbW7z3/T7JvttVAVVN1WbVZftJ+7P3P66Jqun4lvttXa1ObXHtxwPSA/0HIw6217nU1R3SPVRSj9Yr60cOxx++/p3vdy0NNg1VjZzG4iNwRHnk6fcJ3/ceDTradox7rOEH0x92HWcdL2pCmvKaRptTmvtbYlu6T8w+0dbq3nr8R9sfD5w0PFl5SvNUyWna6YLTk2fyz4ydlZ19fi753GDborZ752PO32oPb++6EHTh0kX/i+c7vDvOXPK4dPKy2+UTV7hXmq86X23qdOo8/pPTT8e7nLuarrlca7nuer21e2b36RueN87d9L158Rb/1tWeOT3dvfN6b/fF9/XfFt1+cif9zsu72Xcn7q28T7xf9EDtQdlD3YfVP1v+3Njv3H9qwHeg89HcR/cGhYPP/pH1jw9DBY+Zj8uGDYbrnjg+OTniP3L96fynQ89kzyaeF/6i/suuFxYvfvjV69fO0ZjRoZfyl5O/bXyl/erA6xmv28bCxh6+yXgzMV70VvvtwXfcdx3vo98PT+R8IH8o/2j5sfVT0Kf7kxmTk/8EA5jz/GMzLdsAAAAgY0hSTQAAeiUAAICDAAD5/wAAgOkAAHUwAADqYAAAOpgAABdvkl/FRgAABiBJREFUeNqkV2tQlOcVfp7zvgvDRe66y8htXUBR1GoFI+BtFJvRtjPJBGeaH2a8DGmbttgSTWbSJEw6TWOsrbbpTIeJZGqaTipTa6LJZDTVUTYQdNAohoso6qLucnERN0Axcb/8+HaJUHDX9Pz6vnnPe57vXJ5zzkeEIwaYcwBL/VrW0TCKqZANINEvBhSk3w9eUmC9HzjcsfarOhBGKJN84GkVJHcetvqFu4SAIYELYlpm4LpQQMqoQQKVnzeO7EYV/A8NnHMAGwHWQJmAjtg895LkFa7FU1d258UvGLBGpI4AQM9dd2TrwNn4016n9bS3LqNzsD1VKPAbfhCyqflR31thAzv+La+QxotCoNi6pn1D1s9aVli/3xtOVk72fjT1XVf17E9uHZspFBD8zdk13pdCAjsOyG6KUSEEnrT/tPHluW+cw7eQ19q2z6/t2rsYJEjZ07S6d+ukwI5/yQ7RxnYC2DZnx8dbHNs6xxs85T2R9GprZcmVwYs2BYWsmBzP83m7nIVJS73jdfdd+7PjjUu/XWUCGTtPre7ZHjxTY3Kq8DoV8Ou5u49snPGrKxN58syZ9aVXBztsigoUBd+Xt2NbfZ8llaVvah+vOz9hcX+CJenWp7eOOYS6ePpTU1w39vk+AwCzFPdDQbFGFPCUY2v9hqxfXJ0shNeHLtsUFc6UequbVvdVkwLX0GXbZPpl6Zuu/ij9x/VCBU1dU7bfdFYAIDsSFRCgeOqa9hfy/nDhwfwTKOrRd0U95n0iqch9+cKS5JVtpMCdkllhAhugCHcRwAb7z1tCEp8CCXAWAJRoCFXIYnti+sYWTQ0tll0wQMk+hGUAkBOX714xbV1IyuhxHhIMC/iR5OV9M2JmuhU1Vh7PXiakrIUQhcnLXeHQxPT4GyAtFqgwgAPF5iIFWkeu1SSLCKAweXn3/ZR5rXV7SddQpy3YDoNems9qTI5hGCitm1MOAAx0aaFCerTd84zjBed3Egq9ADA/rqD7Q3ctQC4REDmkYHb8goGgsR2tz5V0DV+xUdQoqAQ81RybU4IgFWgACgpaLLCIBUo0bv63y/aXy6+WBHWz4/IHSIGAuVooiaRgWqD3AsDVoQ6bEgtOrfJUhwrf0WUtk+r8sL6wvHvk5ijVUiJSRrQZuURtfoGMuaCoRyfP/yMy0XykgAA0DPRTxNp31x2ZFuUYBgB7bK7HNdhpKz6WXq6oQCooKghMKhkgji77vBoA1jkXlAvVfRQjFMUcmxSkRWd6gpjeu32R2kxTvyhKh1DQeud8fFBh26zfOe0xuR4JgAbzywCoRSzfeDUKatJKUQK+CjKiHZ6nZ2xzBnU7B9vixTy7qCHSQEhJU3+DtdT6mAcAFiWUeP/xyPH3Jwrfo3XzysemRcEA8F5RY8h6aPE1WwMLQ4OQ/EBANHmdGWHlzZyxk3ayB0m771yGooYy+KE0l35x0iBxZehS6ie9R1PCMaDvCzWDXA4hZ283ptwcvp6qqDBnyao6AWEQrBQQ/7y+d3YoA+NBTAaElo973p8tVFCQyipW+c3pdNu7BwBOe+tm/eniK/kPFWowpMfvuKrzzw80zSKIkWsJe0bHYu163BNwMwDsv7G36ODNtzMnM5IWZfeQgscbisvLPl1aDhLTo7I8k+n/p+dw5pGeg0WKGiS31K6vvTdmA7nx9uDZ9A3xMUIpbvSezE6MSOmbNWXewHhD6dH23o7BlqQvvrwTK6KQFpXl2WyvcE6LTB2eCPSdrurvmcUnO/cVfPD6pMteyfGs3QKpUFQoS9tU/xPH8xe+Tdd693pN/pHug0Xmqntvz1uLDo9Z9v5nnrn+dvujrI1JMUJd3OY7n97ua46douOGpkdlDoUDeG7g1NS/u/5a0Og9scCsB+ysWXSoMuyFftWJvM0E31SBjmWPznHPjy+8NjdhYfeMmJl3EiNSRgCi/25fpGu4M671zjlrm685s2fEnUoQ5lrLLW8uPLj3oX9hqgxIw8n8X1LU7yMkItCHzREZrGQV6ONmy5TggHk247sL/1jFqof/hRn/AWfqC0pI+QHBIk3tICXRrFTpF8hlJaqefh6yFxQ6HwQYlK8HAKyt3WsWxl7fAAAAAElFTkSuQmCC");bottom:10px;right:15px}div.vis-network div.vis-navigation div.vis-button.vis-zoomOut{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAB4AAAAeCAYAAAA7MK6iAAAACXBIWXMAAAsTAAALEwEAmpwYAAAKT2lDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVNnVFPpFj333vRCS4iAlEtvUhUIIFJCi4AUkSYqIQkQSoghodkVUcERRUUEG8igiAOOjoCMFVEsDIoK2AfkIaKOg6OIisr74Xuja9a89+bN/rXXPues852zzwfACAyWSDNRNYAMqUIeEeCDx8TG4eQuQIEKJHAAEAizZCFz/SMBAPh+PDwrIsAHvgABeNMLCADATZvAMByH/w/qQplcAYCEAcB0kThLCIAUAEB6jkKmAEBGAYCdmCZTAKAEAGDLY2LjAFAtAGAnf+bTAICd+Jl7AQBblCEVAaCRACATZYhEAGg7AKzPVopFAFgwABRmS8Q5ANgtADBJV2ZIALC3AMDOEAuyAAgMADBRiIUpAAR7AGDIIyN4AISZABRG8lc88SuuEOcqAAB4mbI8uSQ5RYFbCC1xB1dXLh4ozkkXKxQ2YQJhmkAuwnmZGTKBNA/g88wAAKCRFRHgg/P9eM4Ors7ONo62Dl8t6r8G/yJiYuP+5c+rcEAAAOF0ftH+LC+zGoA7BoBt/qIl7gRoXgugdfeLZrIPQLUAoOnaV/Nw+H48PEWhkLnZ2eXk5NhKxEJbYcpXff5nwl/AV/1s+X48/Pf14L7iJIEyXYFHBPjgwsz0TKUcz5IJhGLc5o9H/LcL//wd0yLESWK5WCoU41EScY5EmozzMqUiiUKSKcUl0v9k4t8s+wM+3zUAsGo+AXuRLahdYwP2SycQWHTA4vcAAPK7b8HUKAgDgGiD4c93/+8//UegJQCAZkmScQAAXkQkLlTKsz/HCAAARKCBKrBBG/TBGCzABhzBBdzBC/xgNoRCJMTCQhBCCmSAHHJgKayCQiiGzbAdKmAv1EAdNMBRaIaTcA4uwlW4Dj1wD/phCJ7BKLyBCQRByAgTYSHaiAFiilgjjggXmYX4IcFIBBKLJCDJiBRRIkuRNUgxUopUIFVIHfI9cgI5h1xGupE7yAAygvyGvEcxlIGyUT3UDLVDuag3GoRGogvQZHQxmo8WoJvQcrQaPYw2oefQq2gP2o8+Q8cwwOgYBzPEbDAuxsNCsTgsCZNjy7EirAyrxhqwVqwDu4n1Y8+xdwQSgUXACTYEd0IgYR5BSFhMWE7YSKggHCQ0EdoJNwkDhFHCJyKTqEu0JroR+cQYYjIxh1hILCPWEo8TLxB7iEPENyQSiUMyJ7mQAkmxpFTSEtJG0m5SI+ksqZs0SBojk8naZGuyBzmULCAryIXkneTD5DPkG+Qh8lsKnWJAcaT4U+IoUspqShnlEOU05QZlmDJBVaOaUt2ooVQRNY9aQq2htlKvUYeoEzR1mjnNgxZJS6WtopXTGmgXaPdpr+h0uhHdlR5Ol9BX0svpR+iX6AP0dwwNhhWDx4hnKBmbGAcYZxl3GK+YTKYZ04sZx1QwNzHrmOeZD5lvVVgqtip8FZHKCpVKlSaVGyovVKmqpqreqgtV81XLVI+pXlN9rkZVM1PjqQnUlqtVqp1Q61MbU2epO6iHqmeob1Q/pH5Z/YkGWcNMw09DpFGgsV/jvMYgC2MZs3gsIWsNq4Z1gTXEJrHN2Xx2KruY/R27iz2qqaE5QzNKM1ezUvOUZj8H45hx+Jx0TgnnKKeX836K3hTvKeIpG6Y0TLkxZVxrqpaXllirSKtRq0frvTau7aedpr1Fu1n7gQ5Bx0onXCdHZ4/OBZ3nU9lT3acKpxZNPTr1ri6qa6UbobtEd79up+6Ynr5egJ5Mb6feeb3n+hx9L/1U/W36p/VHDFgGswwkBtsMzhg8xTVxbzwdL8fb8VFDXcNAQ6VhlWGX4YSRudE8o9VGjUYPjGnGXOMk423GbcajJgYmISZLTepN7ppSTbmmKaY7TDtMx83MzaLN1pk1mz0x1zLnm+eb15vft2BaeFostqi2uGVJsuRaplnutrxuhVo5WaVYVVpds0atna0l1rutu6cRp7lOk06rntZnw7Dxtsm2qbcZsOXYBtuutm22fWFnYhdnt8Wuw+6TvZN9un2N/T0HDYfZDqsdWh1+c7RyFDpWOt6azpzuP33F9JbpL2dYzxDP2DPjthPLKcRpnVOb00dnF2e5c4PziIuJS4LLLpc+Lpsbxt3IveRKdPVxXeF60vWdm7Obwu2o26/uNu5p7ofcn8w0nymeWTNz0MPIQ+BR5dE/C5+VMGvfrH5PQ0+BZ7XnIy9jL5FXrdewt6V3qvdh7xc+9j5yn+M+4zw33jLeWV/MN8C3yLfLT8Nvnl+F30N/I/9k/3r/0QCngCUBZwOJgUGBWwL7+Hp8Ib+OPzrbZfay2e1BjKC5QRVBj4KtguXBrSFoyOyQrSH355jOkc5pDoVQfujW0Adh5mGLw34MJ4WHhVeGP45wiFga0TGXNXfR3ENz30T6RJZE3ptnMU85ry1KNSo+qi5qPNo3ujS6P8YuZlnM1VidWElsSxw5LiquNm5svt/87fOH4p3iC+N7F5gvyF1weaHOwvSFpxapLhIsOpZATIhOOJTwQRAqqBaMJfITdyWOCnnCHcJnIi/RNtGI2ENcKh5O8kgqTXqS7JG8NXkkxTOlLOW5hCepkLxMDUzdmzqeFpp2IG0yPTq9MYOSkZBxQqohTZO2Z+pn5mZ2y6xlhbL+xW6Lty8elQfJa7OQrAVZLQq2QqboVFoo1yoHsmdlV2a/zYnKOZarnivN7cyzytuQN5zvn//tEsIS4ZK2pYZLVy0dWOa9rGo5sjxxedsK4xUFK4ZWBqw8uIq2Km3VT6vtV5eufr0mek1rgV7ByoLBtQFr6wtVCuWFfevc1+1dT1gvWd+1YfqGnRs+FYmKrhTbF5cVf9go3HjlG4dvyr+Z3JS0qavEuWTPZtJm6ebeLZ5bDpaql+aXDm4N2dq0Dd9WtO319kXbL5fNKNu7g7ZDuaO/PLi8ZafJzs07P1SkVPRU+lQ27tLdtWHX+G7R7ht7vPY07NXbW7z3/T7JvttVAVVN1WbVZftJ+7P3P66Jqun4lvttXa1ObXHtxwPSA/0HIw6217nU1R3SPVRSj9Yr60cOxx++/p3vdy0NNg1VjZzG4iNwRHnk6fcJ3/ceDTradox7rOEH0x92HWcdL2pCmvKaRptTmvtbYlu6T8w+0dbq3nr8R9sfD5w0PFl5SvNUyWna6YLTk2fyz4ydlZ19fi753GDborZ752PO32oPb++6EHTh0kX/i+c7vDvOXPK4dPKy2+UTV7hXmq86X23qdOo8/pPTT8e7nLuarrlca7nuer21e2b36RueN87d9L158Rb/1tWeOT3dvfN6b/fF9/XfFt1+cif9zsu72Xcn7q28T7xf9EDtQdlD3YfVP1v+3Njv3H9qwHeg89HcR/cGhYPP/pH1jw9DBY+Zj8uGDYbrnjg+OTniP3L96fynQ89kzyaeF/6i/suuFxYvfvjV69fO0ZjRoZfyl5O/bXyl/erA6xmv28bCxh6+yXgzMV70VvvtwXfcdx3vo98PT+R8IH8o/2j5sfVT0Kf7kxmTk/8EA5jz/GMzLdsAAAAgY0hSTQAAeiUAAICDAAD5/wAAgOkAAHUwAADqYAAAOpgAABdvkl/FRgAABV5JREFUeNq0l2tQVVUYht/3W/vACMr16IFRQDiAgChpgiikMqY1WjnN9KsfGOXYTOVgkvbDUsZuXrK0qZmGUSvNspjI8TZOmo6AGBoZYly8YB6Qw80DBwQ6jJ3dj30OZZmiwvtv77XW96y91l7v9y1iMNLBuCI84tZkIXU9gwqxAILdokNBOtzgJQWWuYEDFxfcLAGh3y0k79iaD4mfjOVu4WYhoItngBiR6RkuFJAyEJBA3m/lri3Ih/uewXFFyAG4A8oAWkcm2meEzrFNH53Vkhg4xWnxCXcBQGu/3bfGeTbwjKPUcsZRElnfUxcuFLh1Nwh5vurx7s8GDbZ+L+tI/U0hkGGZX5c9/pXqOZYn2gazK8Vth0fvsRUknbx+bIJQQPCts/Mda+4KthbJFoqeKwSejX6pfO2kjytxH1pfuyqlsGH7dJAgZWvFo23L/9muboF+JxtE0/OEwMqJG46uSHinFvepTPO8lhGaX+fPHSdjCKaPy/b3v7az58h/wHFFyIHCRirgjUlbfsiJWXEFD6iUoOkdQaaQ6z9dP2YVahljF4+yXdvZ/evf4G+hQk2sEAUsti4vWxa35gKGSBMDp3T23OxxVXdXRijKovSFzrerC6ELAMT6IhcCZIyeX7c68YPzGGLlxq89PyM0q5YU2M1RuQAg0EERbiaA7Ohl1RgmPTM2p1qjBk1Mm6GDErsfswAgLiDZPmfMwrbhAqeHzm6P8Z9gV9SQdTx2lpCyAEKkhc62YZiVEjTdRgo0zXeBRnImAaSFzm7xdjjtOBGyvmZVZkNvfZjXDhU14+BToFEDKRAQpAJ0HRTjP6XHpYUKEX7RzS9bV5c+FJTmAICUgNSWQ/ZCgJwhIOJIQVLgFKcXvKHm9cyGvithFDUAFQqECho1CBUIggYapAJ1QEFBExNMYoISDU1/NIR9cvndTG/c2IBkp2fC8ZpQgknBGI/3AsDvvRfDlJhwem5zwYMs7VNlaUtbXE1h3mezj9mlGSsXrBkzkFsGKGoDmedBJLfLjxQQgAYdHRSxtPfbfceNsPYBQPTI+GZbT31YxrGIpYoKpIKigkAgFOggNBrbQBBCBaEM2L+iGGmTgnF+Uc1epqO/3VejAoAOUZSLQkFN17lAb4eVCe+VRvvHN4sH6t1feqAmMUGoPHvvhdLzTjzfKoj0sza/GLOy1Bu3vqc20Pgl5YIGkVOEZFZ0nLLMszzdDADTgjIdX6Uf3zfUx6m6u8riKRhOCcmDAqLCURo53Oe4rrsyUlGD0nlIqubdKNZJXOm9FH6y7Yh5uKBnO8vNTX2N4YoKE2fMLREQOsE8AfFN4/ak4QIfbd2XJFRQkLx85ruN7NTp2AoAZxwlCR9dWJc81NDdtoLkc86KBIJwXQ3aOpCPqwuhR2SPbCBlUc2NyogQX3N7wqgU51BAf2w9EFXUtCtLqADqS76ev6/ilgrk2q6esxHZgf5CySh3FMcG+5jbE0ZNdj4odHdDwWPGcZNNO1MPbrxtzdW4s+tI5HPBwQTTzziKY3v/7HGlhmS23g90T+OO5L1Nu7MMw3Fv/Tx1f97/FnsAYPui8/D4nBB/oZZR230uoq67auQoLaB37Iio3sEAK52nR39p+zS13HFiilHeYtOOabdC71jQzz2R+ALBbcrjWNF+cfaUwLSrk4KmtsT4T+gK9jG7AKKjv93X1lcfUNNVaantropqddnDCcIoa7lk29S92+/5CpOvQ04VJ79KUe/7iI/Hh40U6c3PyuPjhmWKN8G8Fvnw1A/zmX/vV5h/T+CXstRMUp4kOFOjZiUlWBkFQYdALitRZXRzf3RqWumdgF79NQDBOa2V/iYSHAAAAABJRU5ErkJggg==");bottom:10px;right:55px}div.vis-network div.vis-navigation div.vis-button.vis-zoomExtends{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAB4AAAAeCAYAAAA7MK6iAAAACXBIWXMAAAsTAAALEwEAmpwYAAAKT2lDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVNnVFPpFj333vRCS4iAlEtvUhUIIFJCi4AUkSYqIQkQSoghodkVUcERRUUEG8igiAOOjoCMFVEsDIoK2AfkIaKOg6OIisr74Xuja9a89+bN/rXXPues852zzwfACAyWSDNRNYAMqUIeEeCDx8TG4eQuQIEKJHAAEAizZCFz/SMBAPh+PDwrIsAHvgABeNMLCADATZvAMByH/w/qQplcAYCEAcB0kThLCIAUAEB6jkKmAEBGAYCdmCZTAKAEAGDLY2LjAFAtAGAnf+bTAICd+Jl7AQBblCEVAaCRACATZYhEAGg7AKzPVopFAFgwABRmS8Q5ANgtADBJV2ZIALC3AMDOEAuyAAgMADBRiIUpAAR7AGDIIyN4AISZABRG8lc88SuuEOcqAAB4mbI8uSQ5RYFbCC1xB1dXLh4ozkkXKxQ2YQJhmkAuwnmZGTKBNA/g88wAAKCRFRHgg/P9eM4Ors7ONo62Dl8t6r8G/yJiYuP+5c+rcEAAAOF0ftH+LC+zGoA7BoBt/qIl7gRoXgugdfeLZrIPQLUAoOnaV/Nw+H48PEWhkLnZ2eXk5NhKxEJbYcpXff5nwl/AV/1s+X48/Pf14L7iJIEyXYFHBPjgwsz0TKUcz5IJhGLc5o9H/LcL//wd0yLESWK5WCoU41EScY5EmozzMqUiiUKSKcUl0v9k4t8s+wM+3zUAsGo+AXuRLahdYwP2SycQWHTA4vcAAPK7b8HUKAgDgGiD4c93/+8//UegJQCAZkmScQAAXkQkLlTKsz/HCAAARKCBKrBBG/TBGCzABhzBBdzBC/xgNoRCJMTCQhBCCmSAHHJgKayCQiiGzbAdKmAv1EAdNMBRaIaTcA4uwlW4Dj1wD/phCJ7BKLyBCQRByAgTYSHaiAFiilgjjggXmYX4IcFIBBKLJCDJiBRRIkuRNUgxUopUIFVIHfI9cgI5h1xGupE7yAAygvyGvEcxlIGyUT3UDLVDuag3GoRGogvQZHQxmo8WoJvQcrQaPYw2oefQq2gP2o8+Q8cwwOgYBzPEbDAuxsNCsTgsCZNjy7EirAyrxhqwVqwDu4n1Y8+xdwQSgUXACTYEd0IgYR5BSFhMWE7YSKggHCQ0EdoJNwkDhFHCJyKTqEu0JroR+cQYYjIxh1hILCPWEo8TLxB7iEPENyQSiUMyJ7mQAkmxpFTSEtJG0m5SI+ksqZs0SBojk8naZGuyBzmULCAryIXkneTD5DPkG+Qh8lsKnWJAcaT4U+IoUspqShnlEOU05QZlmDJBVaOaUt2ooVQRNY9aQq2htlKvUYeoEzR1mjnNgxZJS6WtopXTGmgXaPdpr+h0uhHdlR5Ol9BX0svpR+iX6AP0dwwNhhWDx4hnKBmbGAcYZxl3GK+YTKYZ04sZx1QwNzHrmOeZD5lvVVgqtip8FZHKCpVKlSaVGyovVKmqpqreqgtV81XLVI+pXlN9rkZVM1PjqQnUlqtVqp1Q61MbU2epO6iHqmeob1Q/pH5Z/YkGWcNMw09DpFGgsV/jvMYgC2MZs3gsIWsNq4Z1gTXEJrHN2Xx2KruY/R27iz2qqaE5QzNKM1ezUvOUZj8H45hx+Jx0TgnnKKeX836K3hTvKeIpG6Y0TLkxZVxrqpaXllirSKtRq0frvTau7aedpr1Fu1n7gQ5Bx0onXCdHZ4/OBZ3nU9lT3acKpxZNPTr1ri6qa6UbobtEd79up+6Ynr5egJ5Mb6feeb3n+hx9L/1U/W36p/VHDFgGswwkBtsMzhg8xTVxbzwdL8fb8VFDXcNAQ6VhlWGX4YSRudE8o9VGjUYPjGnGXOMk423GbcajJgYmISZLTepN7ppSTbmmKaY7TDtMx83MzaLN1pk1mz0x1zLnm+eb15vft2BaeFostqi2uGVJsuRaplnutrxuhVo5WaVYVVpds0atna0l1rutu6cRp7lOk06rntZnw7Dxtsm2qbcZsOXYBtuutm22fWFnYhdnt8Wuw+6TvZN9un2N/T0HDYfZDqsdWh1+c7RyFDpWOt6azpzuP33F9JbpL2dYzxDP2DPjthPLKcRpnVOb00dnF2e5c4PziIuJS4LLLpc+Lpsbxt3IveRKdPVxXeF60vWdm7Obwu2o26/uNu5p7ofcn8w0nymeWTNz0MPIQ+BR5dE/C5+VMGvfrH5PQ0+BZ7XnIy9jL5FXrdewt6V3qvdh7xc+9j5yn+M+4zw33jLeWV/MN8C3yLfLT8Nvnl+F30N/I/9k/3r/0QCngCUBZwOJgUGBWwL7+Hp8Ib+OPzrbZfay2e1BjKC5QRVBj4KtguXBrSFoyOyQrSH355jOkc5pDoVQfujW0Adh5mGLw34MJ4WHhVeGP45wiFga0TGXNXfR3ENz30T6RJZE3ptnMU85ry1KNSo+qi5qPNo3ujS6P8YuZlnM1VidWElsSxw5LiquNm5svt/87fOH4p3iC+N7F5gvyF1weaHOwvSFpxapLhIsOpZATIhOOJTwQRAqqBaMJfITdyWOCnnCHcJnIi/RNtGI2ENcKh5O8kgqTXqS7JG8NXkkxTOlLOW5hCepkLxMDUzdmzqeFpp2IG0yPTq9MYOSkZBxQqohTZO2Z+pn5mZ2y6xlhbL+xW6Lty8elQfJa7OQrAVZLQq2QqboVFoo1yoHsmdlV2a/zYnKOZarnivN7cyzytuQN5zvn//tEsIS4ZK2pYZLVy0dWOa9rGo5sjxxedsK4xUFK4ZWBqw8uIq2Km3VT6vtV5eufr0mek1rgV7ByoLBtQFr6wtVCuWFfevc1+1dT1gvWd+1YfqGnRs+FYmKrhTbF5cVf9go3HjlG4dvyr+Z3JS0qavEuWTPZtJm6ebeLZ5bDpaql+aXDm4N2dq0Dd9WtO319kXbL5fNKNu7g7ZDuaO/PLi8ZafJzs07P1SkVPRU+lQ27tLdtWHX+G7R7ht7vPY07NXbW7z3/T7JvttVAVVN1WbVZftJ+7P3P66Jqun4lvttXa1ObXHtxwPSA/0HIw6217nU1R3SPVRSj9Yr60cOxx++/p3vdy0NNg1VjZzG4iNwRHnk6fcJ3/ceDTradox7rOEH0x92HWcdL2pCmvKaRptTmvtbYlu6T8w+0dbq3nr8R9sfD5w0PFl5SvNUyWna6YLTk2fyz4ydlZ19fi753GDborZ752PO32oPb++6EHTh0kX/i+c7vDvOXPK4dPKy2+UTV7hXmq86X23qdOo8/pPTT8e7nLuarrlca7nuer21e2b36RueN87d9L158Rb/1tWeOT3dvfN6b/fF9/XfFt1+cif9zsu72Xcn7q28T7xf9EDtQdlD3YfVP1v+3Njv3H9qwHeg89HcR/cGhYPP/pH1jw9DBY+Zj8uGDYbrnjg+OTniP3L96fynQ89kzyaeF/6i/suuFxYvfvjV69fO0ZjRoZfyl5O/bXyl/erA6xmv28bCxh6+yXgzMV70VvvtwXfcdx3vo98PT+R8IH8o/2j5sfVT0Kf7kxmTk/8EA5jz/GMzLdsAAAAgY0hSTQAAeiUAAICDAAD5/wAAgOkAAHUwAADqYAAAOpgAABdvkl/FRgAABptJREFUeNqsl21QlNcVx///cx9hIipuAJHasgHlRdw0xay7yK7smg6sb2DSdtqZduLUNENmOk1tQuM4U7UzTvshSRlFZzoNCWSSSTJp+6VNkLCAeQHBoCCgqNBE0wUqL+KuwIiiZZ9+eHa3aAS3Sf8zO8/L3nt+95x7z7n3YWlpKUQEJAEgch9+Jola9xEC2ADBVgAOKqwCYAqKDgUJBIHPBWwFWQNdbyZFBwAC0GGIAHQSj3/8HHRdhzYbdDfwg4IjAsGvICgXAroYBiCEDkBBACBZoyST4gDwQqh7mQ4cEkhQD0EBIIggRMQAh2EiEvEYAGrdR3YSqIYCIEDaotVDeYnu/ryEjSOr43PHl8WmTBPA6PRQ7IWJrvhT/ubkU/7m1EvX+1KEUh7Ug+WkPEXgdUSkR+xrd0NJ4qjr8AEI9pGAI7mo78mHfnF+Y/K2K7iHUheuvJG6cOUNz/LvDwPobrpSl/Ruf2VOy9UPs4RSTSANwH4Y449EVdnt9ojHIeghCHYLgR+n/7zt4Np32tIWZU4hSpnjVk1t/caPfOO3/f++MNH5TVJcisoEoo4ksgbsXwYfdR1+kQplQuCFNS82Pp/9+158RTkTC0ce0OKutQeOp5PME0qcUBqyBmwGOC8vz4AWVOyE4CUqYO/Dh+p3pj//Bb6mHllqCyxd8ODVT69+uFKoOYTSnzFg7SJpzHFNQYWiQrUIsCN9V+uOh375zz179pSGI1FSUuK12+2+aGDt7e3muro6T/h57969lZdvDrT+ZbA6n0B1nfPVN7e0PjMjIgIIdkEAR1JR329yDvaE0+l/hQKA1Wr1bd682SsikUW7K+O3PesTNvaSAiXaLhGBvO86RFEoJ4Adac+eDxsgiZKSEm9NTY3n5MmT5mjBHR0d5vr6es+mTZu8SqnI+x+s+Ol5jRo0auX1jtepQaEAADKWWIbcy7ZGUmb79u1eu93uI+mtra31HLj5TGDs9rBJICCNn1GRCKGCUJAUuzzw6CfbTB6Px7t27VofAG/YXl6Ceyw9LmvIN3UxZUafKRACWyCELcHVP3vk4fDabDZf+2N/D9g+fsLEEFSooFGDogZNFkBRgSCsTcWm066jgRAU4et/F5u9nxRosmCLRmE+QdgSXCNzhW/s9rDJ63wVJx77V+V8YS6UNaW8BdOcqzx+3Ujt0F8Bcr1GMIMU5CzJHZ+rg6IGCYV2PimoyIK6lzIWrxkPTVGmRoqJFCyLTZmeq4MB5f3BVADnbpcQkzStUQMAk0YKBPfzxlhA95NQQe43QBotBECAFFyZHo6dz6CKCizAPFPivzUWqxm2AqIgnwkFvZNn4uczGK3Hah7wpet98UZ85R8aKScIcXYEWpMLkx8fvleHpNjlAWtTsakQa0pVKGcJQqMGUqCHBvfdjp/gTP6xwFzg85PdyaH2J4SUowKiw3889e4KBACnT582W5uKTV2uusAdUFlgzBcFQoFGDT35HwW+82mhqaenxwwA4WtYfRNnUkMZUqsJpEkn8cXU5yktYw2JjsTCMQDwer0ekt6GhgZPUVGRd3fu7qjqdU9Mj7mlpcVD0tvS0uKxWCyVANB5rS3x8s3BFEUFgTTLtuZndQHLBMSfB6pyZtfqMDQ3NzfqTcJisficTqc3BI+8bxh9L8corarM3fnDoIT+rACAU/7m7MOfHbCEwQDQ2Njo6erqinqTOHfuXNjjiI23+ystZ8c7smmkWgVJcN++fRARfLDhlacEUqVEQ1nm77xPrHjSh/+Djo3WmN/s/6OHEOgIPr2h63tVuq5Dud1ukETWoK3zorkzTiiONn/TKlNM4lj24m+Pf13o2wOVHqGA5MsAXjKPrDaqnMvlQnjTzhy0Nlw0d5oI5p3yN62amrk+ve5B5+hXgb47WGX52+V3NgoFOvQKAGUkkTqcbZy5XC7XHYf4zEFr3aXU7jih5uidPPOtvsmzixZr8VMrHjBHddLsHj+Z9Fb/n9a1+T/JDaXey0IpEzEKkHnU8Jj79++PeEwSSimQRGP+Gz8j5DVFBVKQtjBj6JGlNt/D8Y+OpMdlTphiEqcB4tqtsVjfjUtLLkx0J/dOnjWPTg+lEARIEHwaQJVQIYggACC/qxi6rn8ZHL4XETSsf0MU1HOk/CFGYgAwskUqY5eBitRxzn7/a0V1EEBwdqkN6jPI7y4xPmHmC5unbWdQRMqP2d86qANOksU6gvmArNQRNClqABnQgYuK0krI+wCOAyH3DK/vqOXhaf3PAO7mIRjDNV25AAAAAElFTkSuQmCC");bottom:50px;right:15px}div.vis-network div.vis-manipulation{background:#fff;background:-moz-linear-gradient(top,#fff 0,#fcfcfc 48%,#fafafa 50%,#fcfcfc 100%);background:-webkit-gradient(linear,left top,left bottom,color-stop(0,#fff),color-stop(48%,#fcfcfc),color-stop(50%,#fafafa),color-stop(100%,#fcfcfc));background:-webkit-linear-gradient(top,#fff,#fcfcfc 48%,#fafafa 50%,#fcfcfc);background:-o-linear-gradient(top,#fff 0,#fcfcfc 48%,#fafafa 50%,#fcfcfc 100%);background:-ms-linear-gradient(top,#fff 0,#fcfcfc 48%,#fafafa 50%,#fcfcfc 100%);background:linear-gradient(180deg,#fff 0,#fcfcfc 48%,#fafafa 50%,#fcfcfc);border:0 solid #d6d9d8;border-bottom:1px;box-sizing:content-box;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr="#ffffff",endColorstr="#fcfcfc",GradientType=0);height:28px;left:0;padding-top:4px;position:absolute;top:0;width:100%}div.vis-network button.vis-edit-mode,div.vis-network div.vis-edit-mode{height:30px;left:0;position:absolute;top:5px}div.vis-network button.vis-close{-webkit-touch-callout:none;background-color:transparent;background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAcAAAAHCAYAAADEUlfTAAAACXBIWXMAAAsTAAALEwEAmpwYAAAKT2lDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVNnVFPpFj333vRCS4iAlEtvUhUIIFJCi4AUkSYqIQkQSoghodkVUcERRUUEG8igiAOOjoCMFVEsDIoK2AfkIaKOg6OIisr74Xuja9a89+bN/rXXPues852zzwfACAyWSDNRNYAMqUIeEeCDx8TG4eQuQIEKJHAAEAizZCFz/SMBAPh+PDwrIsAHvgABeNMLCADATZvAMByH/w/qQplcAYCEAcB0kThLCIAUAEB6jkKmAEBGAYCdmCZTAKAEAGDLY2LjAFAtAGAnf+bTAICd+Jl7AQBblCEVAaCRACATZYhEAGg7AKzPVopFAFgwABRmS8Q5ANgtADBJV2ZIALC3AMDOEAuyAAgMADBRiIUpAAR7AGDIIyN4AISZABRG8lc88SuuEOcqAAB4mbI8uSQ5RYFbCC1xB1dXLh4ozkkXKxQ2YQJhmkAuwnmZGTKBNA/g88wAAKCRFRHgg/P9eM4Ors7ONo62Dl8t6r8G/yJiYuP+5c+rcEAAAOF0ftH+LC+zGoA7BoBt/qIl7gRoXgugdfeLZrIPQLUAoOnaV/Nw+H48PEWhkLnZ2eXk5NhKxEJbYcpXff5nwl/AV/1s+X48/Pf14L7iJIEyXYFHBPjgwsz0TKUcz5IJhGLc5o9H/LcL//wd0yLESWK5WCoU41EScY5EmozzMqUiiUKSKcUl0v9k4t8s+wM+3zUAsGo+AXuRLahdYwP2SycQWHTA4vcAAPK7b8HUKAgDgGiD4c93/+8//UegJQCAZkmScQAAXkQkLlTKsz/HCAAARKCBKrBBG/TBGCzABhzBBdzBC/xgNoRCJMTCQhBCCmSAHHJgKayCQiiGzbAdKmAv1EAdNMBRaIaTcA4uwlW4Dj1wD/phCJ7BKLyBCQRByAgTYSHaiAFiilgjjggXmYX4IcFIBBKLJCDJiBRRIkuRNUgxUopUIFVIHfI9cgI5h1xGupE7yAAygvyGvEcxlIGyUT3UDLVDuag3GoRGogvQZHQxmo8WoJvQcrQaPYw2oefQq2gP2o8+Q8cwwOgYBzPEbDAuxsNCsTgsCZNjy7EirAyrxhqwVqwDu4n1Y8+xdwQSgUXACTYEd0IgYR5BSFhMWE7YSKggHCQ0EdoJNwkDhFHCJyKTqEu0JroR+cQYYjIxh1hILCPWEo8TLxB7iEPENyQSiUMyJ7mQAkmxpFTSEtJG0m5SI+ksqZs0SBojk8naZGuyBzmULCAryIXkneTD5DPkG+Qh8lsKnWJAcaT4U+IoUspqShnlEOU05QZlmDJBVaOaUt2ooVQRNY9aQq2htlKvUYeoEzR1mjnNgxZJS6WtopXTGmgXaPdpr+h0uhHdlR5Ol9BX0svpR+iX6AP0dwwNhhWDx4hnKBmbGAcYZxl3GK+YTKYZ04sZx1QwNzHrmOeZD5lvVVgqtip8FZHKCpVKlSaVGyovVKmqpqreqgtV81XLVI+pXlN9rkZVM1PjqQnUlqtVqp1Q61MbU2epO6iHqmeob1Q/pH5Z/YkGWcNMw09DpFGgsV/jvMYgC2MZs3gsIWsNq4Z1gTXEJrHN2Xx2KruY/R27iz2qqaE5QzNKM1ezUvOUZj8H45hx+Jx0TgnnKKeX836K3hTvKeIpG6Y0TLkxZVxrqpaXllirSKtRq0frvTau7aedpr1Fu1n7gQ5Bx0onXCdHZ4/OBZ3nU9lT3acKpxZNPTr1ri6qa6UbobtEd79up+6Ynr5egJ5Mb6feeb3n+hx9L/1U/W36p/VHDFgGswwkBtsMzhg8xTVxbzwdL8fb8VFDXcNAQ6VhlWGX4YSRudE8o9VGjUYPjGnGXOMk423GbcajJgYmISZLTepN7ppSTbmmKaY7TDtMx83MzaLN1pk1mz0x1zLnm+eb15vft2BaeFostqi2uGVJsuRaplnutrxuhVo5WaVYVVpds0atna0l1rutu6cRp7lOk06rntZnw7Dxtsm2qbcZsOXYBtuutm22fWFnYhdnt8Wuw+6TvZN9un2N/T0HDYfZDqsdWh1+c7RyFDpWOt6azpzuP33F9JbpL2dYzxDP2DPjthPLKcRpnVOb00dnF2e5c4PziIuJS4LLLpc+Lpsbxt3IveRKdPVxXeF60vWdm7Obwu2o26/uNu5p7ofcn8w0nymeWTNz0MPIQ+BR5dE/C5+VMGvfrH5PQ0+BZ7XnIy9jL5FXrdewt6V3qvdh7xc+9j5yn+M+4zw33jLeWV/MN8C3yLfLT8Nvnl+F30N/I/9k/3r/0QCngCUBZwOJgUGBWwL7+Hp8Ib+OPzrbZfay2e1BjKC5QRVBj4KtguXBrSFoyOyQrSH355jOkc5pDoVQfujW0Adh5mGLw34MJ4WHhVeGP45wiFga0TGXNXfR3ENz30T6RJZE3ptnMU85ry1KNSo+qi5qPNo3ujS6P8YuZlnM1VidWElsSxw5LiquNm5svt/87fOH4p3iC+N7F5gvyF1weaHOwvSFpxapLhIsOpZATIhOOJTwQRAqqBaMJfITdyWOCnnCHcJnIi/RNtGI2ENcKh5O8kgqTXqS7JG8NXkkxTOlLOW5hCepkLxMDUzdmzqeFpp2IG0yPTq9MYOSkZBxQqohTZO2Z+pn5mZ2y6xlhbL+xW6Lty8elQfJa7OQrAVZLQq2QqboVFoo1yoHsmdlV2a/zYnKOZarnivN7cyzytuQN5zvn//tEsIS4ZK2pYZLVy0dWOa9rGo5sjxxedsK4xUFK4ZWBqw8uIq2Km3VT6vtV5eufr0mek1rgV7ByoLBtQFr6wtVCuWFfevc1+1dT1gvWd+1YfqGnRs+FYmKrhTbF5cVf9go3HjlG4dvyr+Z3JS0qavEuWTPZtJm6ebeLZ5bDpaql+aXDm4N2dq0Dd9WtO319kXbL5fNKNu7g7ZDuaO/PLi8ZafJzs07P1SkVPRU+lQ27tLdtWHX+G7R7ht7vPY07NXbW7z3/T7JvttVAVVN1WbVZftJ+7P3P66Jqun4lvttXa1ObXHtxwPSA/0HIw6217nU1R3SPVRSj9Yr60cOxx++/p3vdy0NNg1VjZzG4iNwRHnk6fcJ3/ceDTradox7rOEH0x92HWcdL2pCmvKaRptTmvtbYlu6T8w+0dbq3nr8R9sfD5w0PFl5SvNUyWna6YLTk2fyz4ydlZ19fi753GDborZ752PO32oPb++6EHTh0kX/i+c7vDvOXPK4dPKy2+UTV7hXmq86X23qdOo8/pPTT8e7nLuarrlca7nuer21e2b36RueN87d9L158Rb/1tWeOT3dvfN6b/fF9/XfFt1+cif9zsu72Xcn7q28T7xf9EDtQdlD3YfVP1v+3Njv3H9qwHeg89HcR/cGhYPP/pH1jw9DBY+Zj8uGDYbrnjg+OTniP3L96fynQ89kzyaeF/6i/suuFxYvfvjV69fO0ZjRoZfyl5O/bXyl/erA6xmv28bCxh6+yXgzMV70VvvtwXfcdx3vo98PT+R8IH8o/2j5sfVT0Kf7kxmTk/8EA5jz/GMzLdsAADvGaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCI/Pgo8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJBZG9iZSBYTVAgQ29yZSA1LjUtYzAyMSA3OS4xNTQ5MTEsIDIwMTMvMTAvMjktMTE6NDc6MTYgICAgICAgICI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyIKICAgICAgICAgICAgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iCiAgICAgICAgICAgIHhtbG5zOnN0RXZ0PSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VFdmVudCMiCiAgICAgICAgICAgIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIKICAgICAgICAgICAgeG1sbnM6cGhvdG9zaG9wPSJodHRwOi8vbnMuYWRvYmUuY29tL3Bob3Rvc2hvcC8xLjAvIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyIKICAgICAgICAgICAgeG1sbnM6ZXhpZj0iaHR0cDovL25zLmFkb2JlLmNvbS9leGlmLzEuMC8iPgogICAgICAgICA8eG1wOkNyZWF0b3JUb29sPkFkb2JlIFBob3Rvc2hvcCBDQyAoV2luZG93cyk8L3htcDpDcmVhdG9yVG9vbD4KICAgICAgICAgPHhtcDpDcmVhdGVEYXRlPjIwMTQtMDItMTRUMTE6NTU6MzUrMDE6MDA8L3htcDpDcmVhdGVEYXRlPgogICAgICAgICA8eG1wOk1ldGFkYXRhRGF0ZT4yMDE0LTAyLTE0VDEyOjA1OjE3KzAxOjAwPC94bXA6TWV0YWRhdGFEYXRlPgogICAgICAgICA8eG1wOk1vZGlmeURhdGU+MjAxNC0wMi0xNFQxMjowNToxNyswMTowMDwveG1wOk1vZGlmeURhdGU+CiAgICAgICAgIDx4bXBNTTpJbnN0YW5jZUlEPnhtcC5paWQ6NjU0YmM5YmQtMWI2Yi1jYjRhLTllOWQtNWY2MzgxNDVjZjk0PC94bXBNTTpJbnN0YW5jZUlEPgogICAgICAgICA8eG1wTU06RG9jdW1lbnRJRD54bXAuZGlkOjk4MmM2MGIwLWUzZjMtMDk0MC04MjU0LTFiZTliNWE0ZTE4MzwveG1wTU06RG9jdW1lbnRJRD4KICAgICAgICAgPHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD54bXAuZGlkOjk4MmM2MGIwLWUzZjMtMDk0MC04MjU0LTFiZTliNWE0ZTE4MzwveG1wTU06T3JpZ2luYWxEb2N1bWVudElEPgogICAgICAgICA8eG1wTU06SGlzdG9yeT4KICAgICAgICAgICAgPHJkZjpTZXE+CiAgICAgICAgICAgICAgIDxyZGY6bGkgcmRmOnBhcnNlVHlwZT0iUmVzb3VyY2UiPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6YWN0aW9uPmNyZWF0ZWQ8L3N0RXZ0OmFjdGlvbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0Omluc3RhbmNlSUQ+eG1wLmlpZDo5ODJjNjBiMC1lM2YzLTA5NDAtODI1NC0xYmU5YjVhNGUxODM8L3N0RXZ0Omluc3RhbmNlSUQ+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDp3aGVuPjIwMTQtMDItMTRUMTE6NTU6MzUrMDE6MDA8L3N0RXZ0OndoZW4+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpzb2Z0d2FyZUFnZW50PkFkb2JlIFBob3Rvc2hvcCBDQyAoV2luZG93cyk8L3N0RXZ0OnNvZnR3YXJlQWdlbnQ+CiAgICAgICAgICAgICAgIDwvcmRmOmxpPgogICAgICAgICAgICAgICA8cmRmOmxpIHJkZjpwYXJzZVR5cGU9IlJlc291cmNlIj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmFjdGlvbj5zYXZlZDwvc3RFdnQ6YWN0aW9uPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6aW5zdGFuY2VJRD54bXAuaWlkOjIxODYxNmM2LTM1MWMtNDI0OS04YWFkLWJkZDQ2ZTczNWE0NDwvc3RFdnQ6aW5zdGFuY2VJRD4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OndoZW4+MjAxNC0wMi0xNFQxMTo1NTozNSswMTowMDwvc3RFdnQ6d2hlbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OnNvZnR3YXJlQWdlbnQ+QWRvYmUgUGhvdG9zaG9wIENDIChXaW5kb3dzKTwvc3RFdnQ6c29mdHdhcmVBZ2VudD4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmNoYW5nZWQ+Lzwvc3RFdnQ6Y2hhbmdlZD4KICAgICAgICAgICAgICAgPC9yZGY6bGk+CiAgICAgICAgICAgICAgIDxyZGY6bGkgcmRmOnBhcnNlVHlwZT0iUmVzb3VyY2UiPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6YWN0aW9uPnNhdmVkPC9zdEV2dDphY3Rpb24+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDppbnN0YW5jZUlEPnhtcC5paWQ6NjU0YmM5YmQtMWI2Yi1jYjRhLTllOWQtNWY2MzgxNDVjZjk0PC9zdEV2dDppbnN0YW5jZUlEPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6d2hlbj4yMDE0LTAyLTE0VDEyOjA1OjE3KzAxOjAwPC9zdEV2dDp3aGVuPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6c29mdHdhcmVBZ2VudD5BZG9iZSBQaG90b3Nob3AgQ0MgKFdpbmRvd3MpPC9zdEV2dDpzb2Z0d2FyZUFnZW50PgogICAgICAgICAgICAgICAgICA8c3RFdnQ6Y2hhbmdlZD4vPC9zdEV2dDpjaGFuZ2VkPgogICAgICAgICAgICAgICA8L3JkZjpsaT4KICAgICAgICAgICAgPC9yZGY6U2VxPgogICAgICAgICA8L3htcE1NOkhpc3Rvcnk+CiAgICAgICAgIDxkYzpmb3JtYXQ+aW1hZ2UvcG5nPC9kYzpmb3JtYXQ+CiAgICAgICAgIDxwaG90b3Nob3A6Q29sb3JNb2RlPjM8L3Bob3Rvc2hvcDpDb2xvck1vZGU+CiAgICAgICAgIDxwaG90b3Nob3A6SUNDUHJvZmlsZT5zUkdCIElFQzYxOTY2LTIuMTwvcGhvdG9zaG9wOklDQ1Byb2ZpbGU+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgICAgIDx0aWZmOlhSZXNvbHV0aW9uPjcyMDAwMC8xMDAwMDwvdGlmZjpYUmVzb2x1dGlvbj4KICAgICAgICAgPHRpZmY6WVJlc29sdXRpb24+NzIwMDAwLzEwMDAwPC90aWZmOllSZXNvbHV0aW9uPgogICAgICAgICA8dGlmZjpSZXNvbHV0aW9uVW5pdD4yPC90aWZmOlJlc29sdXRpb25Vbml0PgogICAgICAgICA8ZXhpZjpDb2xvclNwYWNlPjE8L2V4aWY6Q29sb3JTcGFjZT4KICAgICAgICAgPGV4aWY6UGl4ZWxYRGltZW5zaW9uPjc8L2V4aWY6UGl4ZWxYRGltZW5zaW9uPgogICAgICAgICA8ZXhpZjpQaXhlbFlEaW1lbnNpb24+NzwvZXhpZjpQaXhlbFlEaW1lbnNpb24+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgIAo8P3hwYWNrZXQgZW5kPSJ3Ij8+cZUZMwAAACBjSFJNAAB6JQAAgIMAAPn/AACA6QAAdTAAAOpgAAA6mAAAF2+SX8VGAAAA2ElEQVR42gDLADT/AS0tLUQFBQUVFxcXtPHx8fPl5eUNCAgITCkpKesEHx8fGgYGBjH+/v4a+Pj4qgQEBFU6OjodMTExzwQUFBSvEBAQEfX19SD19fVqNDQ0CElJSd/9/f2vAwEBAfrn5+fkBwcHLRYWFgsXFxfz29vbo9LS0uwDDQ0NDfPz81orKysXIyMj+ODg4Avh4eEa/f391gMkJCRYPz8/KUhISOMCAgKh8fHxHRsbGx4UFBQQBDk5OeY7Ozv7CAgItPb29vMEBASaJSUlTQ0NDesDAEwpT0Ko8Ri2AAAAAElFTkSuQmCC");background-position:20px 3px;background-repeat:no-repeat;border:none;cursor:pointer;height:30px;position:absolute;right:0;top:0;-webkit-user-select:none;-khtml-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;width:30px}div.vis-network button.vis-close:hover{opacity:.6}div.vis-network div.vis-edit-mode button.vis-button,div.vis-network div.vis-manipulation button.vis-button{-webkit-touch-callout:none;background-color:transparent;background-position:0 0;background-repeat:no-repeat;border:none;-moz-border-radius:15px;border-radius:15px;box-sizing:content-box;cursor:pointer;float:left;font-family:verdana;font-size:12px;height:24px;margin-left:10px;padding:0 8px;-webkit-user-select:none;-khtml-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}div.vis-network div.vis-manipulation button.vis-button:hover{box-shadow:1px 1px 8px rgba(0,0,0,.2)}div.vis-network div.vis-manipulation button.vis-button:active{box-shadow:1px 1px 8px rgba(0,0,0,.5)}div.vis-network div.vis-manipulation button.vis-button.vis-back{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAACXBIWXMAAAsTAAALEwEAmpwYAAAKT2lDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVNnVFPpFj333vRCS4iAlEtvUhUIIFJCi4AUkSYqIQkQSoghodkVUcERRUUEG8igiAOOjoCMFVEsDIoK2AfkIaKOg6OIisr74Xuja9a89+bN/rXXPues852zzwfACAyWSDNRNYAMqUIeEeCDx8TG4eQuQIEKJHAAEAizZCFz/SMBAPh+PDwrIsAHvgABeNMLCADATZvAMByH/w/qQplcAYCEAcB0kThLCIAUAEB6jkKmAEBGAYCdmCZTAKAEAGDLY2LjAFAtAGAnf+bTAICd+Jl7AQBblCEVAaCRACATZYhEAGg7AKzPVopFAFgwABRmS8Q5ANgtADBJV2ZIALC3AMDOEAuyAAgMADBRiIUpAAR7AGDIIyN4AISZABRG8lc88SuuEOcqAAB4mbI8uSQ5RYFbCC1xB1dXLh4ozkkXKxQ2YQJhmkAuwnmZGTKBNA/g88wAAKCRFRHgg/P9eM4Ors7ONo62Dl8t6r8G/yJiYuP+5c+rcEAAAOF0ftH+LC+zGoA7BoBt/qIl7gRoXgugdfeLZrIPQLUAoOnaV/Nw+H48PEWhkLnZ2eXk5NhKxEJbYcpXff5nwl/AV/1s+X48/Pf14L7iJIEyXYFHBPjgwsz0TKUcz5IJhGLc5o9H/LcL//wd0yLESWK5WCoU41EScY5EmozzMqUiiUKSKcUl0v9k4t8s+wM+3zUAsGo+AXuRLahdYwP2SycQWHTA4vcAAPK7b8HUKAgDgGiD4c93/+8//UegJQCAZkmScQAAXkQkLlTKsz/HCAAARKCBKrBBG/TBGCzABhzBBdzBC/xgNoRCJMTCQhBCCmSAHHJgKayCQiiGzbAdKmAv1EAdNMBRaIaTcA4uwlW4Dj1wD/phCJ7BKLyBCQRByAgTYSHaiAFiilgjjggXmYX4IcFIBBKLJCDJiBRRIkuRNUgxUopUIFVIHfI9cgI5h1xGupE7yAAygvyGvEcxlIGyUT3UDLVDuag3GoRGogvQZHQxmo8WoJvQcrQaPYw2oefQq2gP2o8+Q8cwwOgYBzPEbDAuxsNCsTgsCZNjy7EirAyrxhqwVqwDu4n1Y8+xdwQSgUXACTYEd0IgYR5BSFhMWE7YSKggHCQ0EdoJNwkDhFHCJyKTqEu0JroR+cQYYjIxh1hILCPWEo8TLxB7iEPENyQSiUMyJ7mQAkmxpFTSEtJG0m5SI+ksqZs0SBojk8naZGuyBzmULCAryIXkneTD5DPkG+Qh8lsKnWJAcaT4U+IoUspqShnlEOU05QZlmDJBVaOaUt2ooVQRNY9aQq2htlKvUYeoEzR1mjnNgxZJS6WtopXTGmgXaPdpr+h0uhHdlR5Ol9BX0svpR+iX6AP0dwwNhhWDx4hnKBmbGAcYZxl3GK+YTKYZ04sZx1QwNzHrmOeZD5lvVVgqtip8FZHKCpVKlSaVGyovVKmqpqreqgtV81XLVI+pXlN9rkZVM1PjqQnUlqtVqp1Q61MbU2epO6iHqmeob1Q/pH5Z/YkGWcNMw09DpFGgsV/jvMYgC2MZs3gsIWsNq4Z1gTXEJrHN2Xx2KruY/R27iz2qqaE5QzNKM1ezUvOUZj8H45hx+Jx0TgnnKKeX836K3hTvKeIpG6Y0TLkxZVxrqpaXllirSKtRq0frvTau7aedpr1Fu1n7gQ5Bx0onXCdHZ4/OBZ3nU9lT3acKpxZNPTr1ri6qa6UbobtEd79up+6Ynr5egJ5Mb6feeb3n+hx9L/1U/W36p/VHDFgGswwkBtsMzhg8xTVxbzwdL8fb8VFDXcNAQ6VhlWGX4YSRudE8o9VGjUYPjGnGXOMk423GbcajJgYmISZLTepN7ppSTbmmKaY7TDtMx83MzaLN1pk1mz0x1zLnm+eb15vft2BaeFostqi2uGVJsuRaplnutrxuhVo5WaVYVVpds0atna0l1rutu6cRp7lOk06rntZnw7Dxtsm2qbcZsOXYBtuutm22fWFnYhdnt8Wuw+6TvZN9un2N/T0HDYfZDqsdWh1+c7RyFDpWOt6azpzuP33F9JbpL2dYzxDP2DPjthPLKcRpnVOb00dnF2e5c4PziIuJS4LLLpc+Lpsbxt3IveRKdPVxXeF60vWdm7Obwu2o26/uNu5p7ofcn8w0nymeWTNz0MPIQ+BR5dE/C5+VMGvfrH5PQ0+BZ7XnIy9jL5FXrdewt6V3qvdh7xc+9j5yn+M+4zw33jLeWV/MN8C3yLfLT8Nvnl+F30N/I/9k/3r/0QCngCUBZwOJgUGBWwL7+Hp8Ib+OPzrbZfay2e1BjKC5QRVBj4KtguXBrSFoyOyQrSH355jOkc5pDoVQfujW0Adh5mGLw34MJ4WHhVeGP45wiFga0TGXNXfR3ENz30T6RJZE3ptnMU85ry1KNSo+qi5qPNo3ujS6P8YuZlnM1VidWElsSxw5LiquNm5svt/87fOH4p3iC+N7F5gvyF1weaHOwvSFpxapLhIsOpZATIhOOJTwQRAqqBaMJfITdyWOCnnCHcJnIi/RNtGI2ENcKh5O8kgqTXqS7JG8NXkkxTOlLOW5hCepkLxMDUzdmzqeFpp2IG0yPTq9MYOSkZBxQqohTZO2Z+pn5mZ2y6xlhbL+xW6Lty8elQfJa7OQrAVZLQq2QqboVFoo1yoHsmdlV2a/zYnKOZarnivN7cyzytuQN5zvn//tEsIS4ZK2pYZLVy0dWOa9rGo5sjxxedsK4xUFK4ZWBqw8uIq2Km3VT6vtV5eufr0mek1rgV7ByoLBtQFr6wtVCuWFfevc1+1dT1gvWd+1YfqGnRs+FYmKrhTbF5cVf9go3HjlG4dvyr+Z3JS0qavEuWTPZtJm6ebeLZ5bDpaql+aXDm4N2dq0Dd9WtO319kXbL5fNKNu7g7ZDuaO/PLi8ZafJzs07P1SkVPRU+lQ27tLdtWHX+G7R7ht7vPY07NXbW7z3/T7JvttVAVVN1WbVZftJ+7P3P66Jqun4lvttXa1ObXHtxwPSA/0HIw6217nU1R3SPVRSj9Yr60cOxx++/p3vdy0NNg1VjZzG4iNwRHnk6fcJ3/ceDTradox7rOEH0x92HWcdL2pCmvKaRptTmvtbYlu6T8w+0dbq3nr8R9sfD5w0PFl5SvNUyWna6YLTk2fyz4ydlZ19fi753GDborZ752PO32oPb++6EHTh0kX/i+c7vDvOXPK4dPKy2+UTV7hXmq86X23qdOo8/pPTT8e7nLuarrlca7nuer21e2b36RueN87d9L158Rb/1tWeOT3dvfN6b/fF9/XfFt1+cif9zsu72Xcn7q28T7xf9EDtQdlD3YfVP1v+3Njv3H9qwHeg89HcR/cGhYPP/pH1jw9DBY+Zj8uGDYbrnjg+OTniP3L96fynQ89kzyaeF/6i/suuFxYvfvjV69fO0ZjRoZfyl5O/bXyl/erA6xmv28bCxh6+yXgzMV70VvvtwXfcdx3vo98PT+R8IH8o/2j5sfVT0Kf7kxmTk/8EA5jz/GMzLdsAAEEOaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCI/Pgo8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJBZG9iZSBYTVAgQ29yZSA1LjUtYzAyMSA3OS4xNTQ5MTEsIDIwMTMvMTAvMjktMTE6NDc6MTYgICAgICAgICI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyIKICAgICAgICAgICAgeG1sbnM6ZGM9Imh0dHA6Ly9wdXJsLm9yZy9kYy9lbGVtZW50cy8xLjEvIgogICAgICAgICAgICB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIKICAgICAgICAgICAgeG1sbnM6c3RFdnQ9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZUV2ZW50IyIKICAgICAgICAgICAgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiCiAgICAgICAgICAgIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIKICAgICAgICAgICAgeG1sbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90aWZmLzEuMC8iCiAgICAgICAgICAgIHhtbG5zOmV4aWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vZXhpZi8xLjAvIj4KICAgICAgICAgPHhtcDpDcmVhdG9yVG9vbD5BZG9iZSBQaG90b3Nob3AgQ0MgKFdpbmRvd3MpPC94bXA6Q3JlYXRvclRvb2w+CiAgICAgICAgIDx4bXA6Q3JlYXRlRGF0ZT4yMDE0LTAxLTIyVDE5OjI0OjUxKzAxOjAwPC94bXA6Q3JlYXRlRGF0ZT4KICAgICAgICAgPHhtcDpNZXRhZGF0YURhdGU+MjAxNC0wMi0wNFQxNTowMTowOSswMTowMDwveG1wOk1ldGFkYXRhRGF0ZT4KICAgICAgICAgPHhtcDpNb2RpZnlEYXRlPjIwMTQtMDItMDRUMTU6MDE6MDkrMDE6MDA8L3htcDpNb2RpZnlEYXRlPgogICAgICAgICA8ZGM6Zm9ybWF0PmltYWdlL3BuZzwvZGM6Zm9ybWF0PgogICAgICAgICA8eG1wTU06SW5zdGFuY2VJRD54bXAuaWlkOmI2YjQwMjVkLTAxNjQtMzU0OC1hOTdlLTQ4ZmYxMWM3NTYzMzwveG1wTU06SW5zdGFuY2VJRD4KICAgICAgICAgPHhtcE1NOkRvY3VtZW50SUQ+eG1wLmRpZDpFQTc2MkY5Njc0ODNFMzExOTQ4QkQxM0UyQkU3OTlBMTwveG1wTU06RG9jdW1lbnRJRD4KICAgICAgICAgPHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD54bXAuZGlkOjczQjYyQUFEOTE4M0UzMTE5NDhCRDEzRTJCRTc5OUExPC94bXBNTTpPcmlnaW5hbERvY3VtZW50SUQ+CiAgICAgICAgIDx4bXBNTTpIaXN0b3J5PgogICAgICAgICAgICA8cmRmOlNlcT4KICAgICAgICAgICAgICAgPHJkZjpsaSByZGY6cGFyc2VUeXBlPSJSZXNvdXJjZSI+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDphY3Rpb24+Y3JlYXRlZDwvc3RFdnQ6YWN0aW9uPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6aW5zdGFuY2VJRD54bXAuaWlkOjczQjYyQUFEOTE4M0UzMTE5NDhCRDEzRTJCRTc5OUExPC9zdEV2dDppbnN0YW5jZUlEPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6d2hlbj4yMDE0LTAxLTIyVDE5OjI0OjUxKzAxOjAwPC9zdEV2dDp3aGVuPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6c29mdHdhcmVBZ2VudD5BZG9iZSBQaG90b3Nob3AgQ1M2IChXaW5kb3dzKTwvc3RFdnQ6c29mdHdhcmVBZ2VudD4KICAgICAgICAgICAgICAgPC9yZGY6bGk+CiAgICAgICAgICAgICAgIDxyZGY6bGkgcmRmOnBhcnNlVHlwZT0iUmVzb3VyY2UiPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6YWN0aW9uPnNhdmVkPC9zdEV2dDphY3Rpb24+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDppbnN0YW5jZUlEPnhtcC5paWQ6RUE2MEEyNEUxOTg0RTMxMUFEQUZFRkU2RUMzMzNFMDM8L3N0RXZ0Omluc3RhbmNlSUQ+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDp3aGVuPjIwMTQtMDEtMjNUMTk6MTg6MDcrMDE6MDA8L3N0RXZ0OndoZW4+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpzb2Z0d2FyZUFnZW50PkFkb2JlIFBob3Rvc2hvcCBDUzYgKFdpbmRvd3MpPC9zdEV2dDpzb2Z0d2FyZUFnZW50PgogICAgICAgICAgICAgICAgICA8c3RFdnQ6Y2hhbmdlZD4vPC9zdEV2dDpjaGFuZ2VkPgogICAgICAgICAgICAgICA8L3JkZjpsaT4KICAgICAgICAgICAgICAgPHJkZjpsaSByZGY6cGFyc2VUeXBlPSJSZXNvdXJjZSI+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDphY3Rpb24+c2F2ZWQ8L3N0RXZ0OmFjdGlvbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0Omluc3RhbmNlSUQ+eG1wLmlpZDpmOWQ3OGY4ZC1lNzY0LTc1NDgtODZiNy1iNmQ1OGMzZDg2OTc8L3N0RXZ0Omluc3RhbmNlSUQ+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDp3aGVuPjIwMTQtMDItMDRUMTU6MDE6MDkrMDE6MDA8L3N0RXZ0OndoZW4+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpzb2Z0d2FyZUFnZW50PkFkb2JlIFBob3Rvc2hvcCBDQyAoV2luZG93cyk8L3N0RXZ0OnNvZnR3YXJlQWdlbnQ+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpjaGFuZ2VkPi88L3N0RXZ0OmNoYW5nZWQ+CiAgICAgICAgICAgICAgIDwvcmRmOmxpPgogICAgICAgICAgICAgICA8cmRmOmxpIHJkZjpwYXJzZVR5cGU9IlJlc291cmNlIj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmFjdGlvbj5jb252ZXJ0ZWQ8L3N0RXZ0OmFjdGlvbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OnBhcmFtZXRlcnM+ZnJvbSBhcHBsaWNhdGlvbi92bmQuYWRvYmUucGhvdG9zaG9wIHRvIGltYWdlL3BuZzwvc3RFdnQ6cGFyYW1ldGVycz4KICAgICAgICAgICAgICAgPC9yZGY6bGk+CiAgICAgICAgICAgICAgIDxyZGY6bGkgcmRmOnBhcnNlVHlwZT0iUmVzb3VyY2UiPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6YWN0aW9uPmRlcml2ZWQ8L3N0RXZ0OmFjdGlvbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OnBhcmFtZXRlcnM+Y29udmVydGVkIGZyb20gYXBwbGljYXRpb24vdm5kLmFkb2JlLnBob3Rvc2hvcCB0byBpbWFnZS9wbmc8L3N0RXZ0OnBhcmFtZXRlcnM+CiAgICAgICAgICAgICAgIDwvcmRmOmxpPgogICAgICAgICAgICAgICA8cmRmOmxpIHJkZjpwYXJzZVR5cGU9IlJlc291cmNlIj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmFjdGlvbj5zYXZlZDwvc3RFdnQ6YWN0aW9uPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6aW5zdGFuY2VJRD54bXAuaWlkOmI2YjQwMjVkLTAxNjQtMzU0OC1hOTdlLTQ4ZmYxMWM3NTYzMzwvc3RFdnQ6aW5zdGFuY2VJRD4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OndoZW4+MjAxNC0wMi0wNFQxNTowMTowOSswMTowMDwvc3RFdnQ6d2hlbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OnNvZnR3YXJlQWdlbnQ+QWRvYmUgUGhvdG9zaG9wIENDIChXaW5kb3dzKTwvc3RFdnQ6c29mdHdhcmVBZ2VudD4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmNoYW5nZWQ+Lzwvc3RFdnQ6Y2hhbmdlZD4KICAgICAgICAgICAgICAgPC9yZGY6bGk+CiAgICAgICAgICAgIDwvcmRmOlNlcT4KICAgICAgICAgPC94bXBNTTpIaXN0b3J5PgogICAgICAgICA8eG1wTU06RGVyaXZlZEZyb20gcmRmOnBhcnNlVHlwZT0iUmVzb3VyY2UiPgogICAgICAgICAgICA8c3RSZWY6aW5zdGFuY2VJRD54bXAuaWlkOmY5ZDc4ZjhkLWU3NjQtNzU0OC04NmI3LWI2ZDU4YzNkODY5Nzwvc3RSZWY6aW5zdGFuY2VJRD4KICAgICAgICAgICAgPHN0UmVmOmRvY3VtZW50SUQ+eG1wLmRpZDpFQTc2MkY5Njc0ODNFMzExOTQ4QkQxM0UyQkU3OTlBMTwvc3RSZWY6ZG9jdW1lbnRJRD4KICAgICAgICAgICAgPHN0UmVmOm9yaWdpbmFsRG9jdW1lbnRJRD54bXAuZGlkOjczQjYyQUFEOTE4M0UzMTE5NDhCRDEzRTJCRTc5OUExPC9zdFJlZjpvcmlnaW5hbERvY3VtZW50SUQ+CiAgICAgICAgIDwveG1wTU06RGVyaXZlZEZyb20+CiAgICAgICAgIDxwaG90b3Nob3A6Q29sb3JNb2RlPjM8L3Bob3Rvc2hvcDpDb2xvck1vZGU+CiAgICAgICAgIDxwaG90b3Nob3A6SUNDUHJvZmlsZT5zUkdCIElFQzYxOTY2LTIuMTwvcGhvdG9zaG9wOklDQ1Byb2ZpbGU+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgICAgIDx0aWZmOlhSZXNvbHV0aW9uPjcyMDA5MC8xMDAwMDwvdGlmZjpYUmVzb2x1dGlvbj4KICAgICAgICAgPHRpZmY6WVJlc29sdXRpb24+NzIwMDkwLzEwMDAwPC90aWZmOllSZXNvbHV0aW9uPgogICAgICAgICA8dGlmZjpSZXNvbHV0aW9uVW5pdD4yPC90aWZmOlJlc29sdXRpb25Vbml0PgogICAgICAgICA8ZXhpZjpDb2xvclNwYWNlPjE8L2V4aWY6Q29sb3JTcGFjZT4KICAgICAgICAgPGV4aWY6UGl4ZWxYRGltZW5zaW9uPjI0PC9leGlmOlBpeGVsWERpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6UGl4ZWxZRGltZW5zaW9uPjI0PC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+CiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgCjw/eHBhY2tldCBlbmQ9InciPz4jq1U/AAAAIGNIUk0AAHolAACAgwAA+f8AAIDpAAB1MAAA6mAAADqYAAAXb5JfxUYAAAVTSURBVHjanFVfTFNnFP+d77ve8qeVFbBrpcVgRrCRFikFByLxwSAaE32oRCHD6JMxxhhn8G2RxxH3MsOTbyYsmCAxPMmMMYtkIUYmK60OO0qAK23BFlNob0uh3x7WS5jLZPpLbm6+k/P9zrm5v9855PF4UFhYCABgjIExBgAgIqRSqRIi6gDQRkQ1RGTB3wgR0e8AHgH4Sa/XR/EBiAiJRAJ04cIF5Ofng4g2n0gkUkxENwF0c843LzHGQEQQQkCLExEA9ALotVgsUQAQQmgNQhJCbF5kjCEUCl0moj4t5na7fTU1NUpVVVXUYrEkASAcDhe8efOmxOfzWScmJqoBdBNR99LS0hWz2dynNSSEAF28eBGFhYVgjCEcDn9HRD1EhIMHD3o9Hs9kWVlZAh9BKBQqGB4edr58+dKZ+6JbJpOpBwBWV1fB6+rqIMsyIpHIFcZYL2MMra2tY5cuXRrfuXNnBtvAYDBk3G63oqpqZm5uzgrgSDKZjBoMhueZTAbc5XIhFouVEtFTxhiOHTs2dv78eS8+Efv374+oqpqZnZ21cs5PJJPJPlmWkyynnBuMMTQ0NHi7uro+mVyDx+Pxulwu71ZOlkqlSonoJhGhvb39s8k1nDx50ss5hyRJN9PpdKlERB2aWjSVaEilUvzBgwcORVEs5eXloXPnzk1sV8BkMiUdDofP7/dXZ7PZDilnIhw4cGBeS1pbW2P37t1zBwKBikQiUUREWFhYsHHO0d7evm0Ru90+/+rVq2rO+XGJiJxEhMrKyhgAjI6OWoeHh5tWVla+4JzDZrO9bW5unhwcHGzz+/32np4e+xaDbfoHAMxmc6ijo2O0oqIiJkkSNjY2HBIRmRljMJvNyWfPnln7+/tPMMZQXl6+0NbW9qK2tjYcj8floaEhqKpq+HCkbD3PzMwYBgYG0NXV9UuusFna2kEgELAQEQ4dOvSis7PzN41Ar9dnrl27NqCNkv/C3bt3zy4tLVmICJxzEBFJRBQmorLFxcWCqqqq0Pj4eO3Y2JhbUZTdra2tL2pra8OJRGLHnTt3zkqS9K+huHU4EhHMZnMoGo0W5OIh7nK5jjLGKq1W69vDhw8rRqMxMjc3t2t5eXnX5ORklc/nM+fl5SWnpqa+0uv1K/n5+Ws6nW5NluXNd15e3ppOp1uz2WyzZ86cGQ0Gg6ZAIFCZzWZ/lYjokRDiuN/vt7W0tMw3NTUpbrd78P79++5gMFgRiUTKHj58WMYYQ3V19etTp05tq6Lp6Wkb5xxCiEfc7XZPM8a6FxcXTfX19a/1en2Gcy5qamreNjY2/qGq6joRZe12+9Tp06e3JY/FYgWPHz8+mhvr3/CWlpbk+vp6PmOseWVlBS6XS9GSJUkSdrs93NDQ8Oe+ffvC/8fJIyMjddFo9Esi6pVleVjT2m0A8Hq9zqGhIefnjoknT544A4GAM/eDbxMReFNTE0pKSpKqqsaI6Pj8/LxVVdWM3W6PfCr5xMTE1zllXS0uLn6aSqXAGxsbodPpoNfrn6uqCs75EUVRrJFIZMfevXsXdTrdxseIE4mEPDIyUu/3++tynd8yGo29RIR0Og26fv06ioqKwBgD5xzv3r27zBjrIyJIkgSHwzFZWVmp7NmzJ1ZaWpoAgGg0WqgoSvHMzIw1GAw6tvjhitFo7NPW5fv370Hd3d0oKCgA53zTQMvLy+VCiKuSJH0rSdLmztZytIWv5RPRD0T0Y3Fx8dzWfby6ugopHo//w4mcc8iyPMc5v5FOp7/PZrOdQohWInIC2C2EgBBigYi8Qoifs9lsv06nWyIiaFxagXg8jr8GAGxuIe7LBeWhAAAAAElFTkSuQmCC")}div.vis-network div.vis-manipulation div.vis-none:hover{box-shadow:1px 1px 8px transparent;cursor:default}div.vis-network div.vis-manipulation div.vis-none:active{box-shadow:1px 1px 8px transparent}div.vis-network div.vis-manipulation div.vis-none{line-height:23px;padding:0}div.vis-network div.vis-manipulation div.notification{font-weight:700;margin:2px}div.vis-network div.vis-manipulation button.vis-button.vis-add{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAACXBIWXMAAAsTAAALEwEAmpwYAAAKT2lDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVNnVFPpFj333vRCS4iAlEtvUhUIIFJCi4AUkSYqIQkQSoghodkVUcERRUUEG8igiAOOjoCMFVEsDIoK2AfkIaKOg6OIisr74Xuja9a89+bN/rXXPues852zzwfACAyWSDNRNYAMqUIeEeCDx8TG4eQuQIEKJHAAEAizZCFz/SMBAPh+PDwrIsAHvgABeNMLCADATZvAMByH/w/qQplcAYCEAcB0kThLCIAUAEB6jkKmAEBGAYCdmCZTAKAEAGDLY2LjAFAtAGAnf+bTAICd+Jl7AQBblCEVAaCRACATZYhEAGg7AKzPVopFAFgwABRmS8Q5ANgtADBJV2ZIALC3AMDOEAuyAAgMADBRiIUpAAR7AGDIIyN4AISZABRG8lc88SuuEOcqAAB4mbI8uSQ5RYFbCC1xB1dXLh4ozkkXKxQ2YQJhmkAuwnmZGTKBNA/g88wAAKCRFRHgg/P9eM4Ors7ONo62Dl8t6r8G/yJiYuP+5c+rcEAAAOF0ftH+LC+zGoA7BoBt/qIl7gRoXgugdfeLZrIPQLUAoOnaV/Nw+H48PEWhkLnZ2eXk5NhKxEJbYcpXff5nwl/AV/1s+X48/Pf14L7iJIEyXYFHBPjgwsz0TKUcz5IJhGLc5o9H/LcL//wd0yLESWK5WCoU41EScY5EmozzMqUiiUKSKcUl0v9k4t8s+wM+3zUAsGo+AXuRLahdYwP2SycQWHTA4vcAAPK7b8HUKAgDgGiD4c93/+8//UegJQCAZkmScQAAXkQkLlTKsz/HCAAARKCBKrBBG/TBGCzABhzBBdzBC/xgNoRCJMTCQhBCCmSAHHJgKayCQiiGzbAdKmAv1EAdNMBRaIaTcA4uwlW4Dj1wD/phCJ7BKLyBCQRByAgTYSHaiAFiilgjjggXmYX4IcFIBBKLJCDJiBRRIkuRNUgxUopUIFVIHfI9cgI5h1xGupE7yAAygvyGvEcxlIGyUT3UDLVDuag3GoRGogvQZHQxmo8WoJvQcrQaPYw2oefQq2gP2o8+Q8cwwOgYBzPEbDAuxsNCsTgsCZNjy7EirAyrxhqwVqwDu4n1Y8+xdwQSgUXACTYEd0IgYR5BSFhMWE7YSKggHCQ0EdoJNwkDhFHCJyKTqEu0JroR+cQYYjIxh1hILCPWEo8TLxB7iEPENyQSiUMyJ7mQAkmxpFTSEtJG0m5SI+ksqZs0SBojk8naZGuyBzmULCAryIXkneTD5DPkG+Qh8lsKnWJAcaT4U+IoUspqShnlEOU05QZlmDJBVaOaUt2ooVQRNY9aQq2htlKvUYeoEzR1mjnNgxZJS6WtopXTGmgXaPdpr+h0uhHdlR5Ol9BX0svpR+iX6AP0dwwNhhWDx4hnKBmbGAcYZxl3GK+YTKYZ04sZx1QwNzHrmOeZD5lvVVgqtip8FZHKCpVKlSaVGyovVKmqpqreqgtV81XLVI+pXlN9rkZVM1PjqQnUlqtVqp1Q61MbU2epO6iHqmeob1Q/pH5Z/YkGWcNMw09DpFGgsV/jvMYgC2MZs3gsIWsNq4Z1gTXEJrHN2Xx2KruY/R27iz2qqaE5QzNKM1ezUvOUZj8H45hx+Jx0TgnnKKeX836K3hTvKeIpG6Y0TLkxZVxrqpaXllirSKtRq0frvTau7aedpr1Fu1n7gQ5Bx0onXCdHZ4/OBZ3nU9lT3acKpxZNPTr1ri6qa6UbobtEd79up+6Ynr5egJ5Mb6feeb3n+hx9L/1U/W36p/VHDFgGswwkBtsMzhg8xTVxbzwdL8fb8VFDXcNAQ6VhlWGX4YSRudE8o9VGjUYPjGnGXOMk423GbcajJgYmISZLTepN7ppSTbmmKaY7TDtMx83MzaLN1pk1mz0x1zLnm+eb15vft2BaeFostqi2uGVJsuRaplnutrxuhVo5WaVYVVpds0atna0l1rutu6cRp7lOk06rntZnw7Dxtsm2qbcZsOXYBtuutm22fWFnYhdnt8Wuw+6TvZN9un2N/T0HDYfZDqsdWh1+c7RyFDpWOt6azpzuP33F9JbpL2dYzxDP2DPjthPLKcRpnVOb00dnF2e5c4PziIuJS4LLLpc+Lpsbxt3IveRKdPVxXeF60vWdm7Obwu2o26/uNu5p7ofcn8w0nymeWTNz0MPIQ+BR5dE/C5+VMGvfrH5PQ0+BZ7XnIy9jL5FXrdewt6V3qvdh7xc+9j5yn+M+4zw33jLeWV/MN8C3yLfLT8Nvnl+F30N/I/9k/3r/0QCngCUBZwOJgUGBWwL7+Hp8Ib+OPzrbZfay2e1BjKC5QRVBj4KtguXBrSFoyOyQrSH355jOkc5pDoVQfujW0Adh5mGLw34MJ4WHhVeGP45wiFga0TGXNXfR3ENz30T6RJZE3ptnMU85ry1KNSo+qi5qPNo3ujS6P8YuZlnM1VidWElsSxw5LiquNm5svt/87fOH4p3iC+N7F5gvyF1weaHOwvSFpxapLhIsOpZATIhOOJTwQRAqqBaMJfITdyWOCnnCHcJnIi/RNtGI2ENcKh5O8kgqTXqS7JG8NXkkxTOlLOW5hCepkLxMDUzdmzqeFpp2IG0yPTq9MYOSkZBxQqohTZO2Z+pn5mZ2y6xlhbL+xW6Lty8elQfJa7OQrAVZLQq2QqboVFoo1yoHsmdlV2a/zYnKOZarnivN7cyzytuQN5zvn//tEsIS4ZK2pYZLVy0dWOa9rGo5sjxxedsK4xUFK4ZWBqw8uIq2Km3VT6vtV5eufr0mek1rgV7ByoLBtQFr6wtVCuWFfevc1+1dT1gvWd+1YfqGnRs+FYmKrhTbF5cVf9go3HjlG4dvyr+Z3JS0qavEuWTPZtJm6ebeLZ5bDpaql+aXDm4N2dq0Dd9WtO319kXbL5fNKNu7g7ZDuaO/PLi8ZafJzs07P1SkVPRU+lQ27tLdtWHX+G7R7ht7vPY07NXbW7z3/T7JvttVAVVN1WbVZftJ+7P3P66Jqun4lvttXa1ObXHtxwPSA/0HIw6217nU1R3SPVRSj9Yr60cOxx++/p3vdy0NNg1VjZzG4iNwRHnk6fcJ3/ceDTradox7rOEH0x92HWcdL2pCmvKaRptTmvtbYlu6T8w+0dbq3nr8R9sfD5w0PFl5SvNUyWna6YLTk2fyz4ydlZ19fi753GDborZ752PO32oPb++6EHTh0kX/i+c7vDvOXPK4dPKy2+UTV7hXmq86X23qdOo8/pPTT8e7nLuarrlca7nuer21e2b36RueN87d9L158Rb/1tWeOT3dvfN6b/fF9/XfFt1+cif9zsu72Xcn7q28T7xf9EDtQdlD3YfVP1v+3Njv3H9qwHeg89HcR/cGhYPP/pH1jw9DBY+Zj8uGDYbrnjg+OTniP3L96fynQ89kzyaeF/6i/suuFxYvfvjV69fO0ZjRoZfyl5O/bXyl/erA6xmv28bCxh6+yXgzMV70VvvtwXfcdx3vo98PT+R8IH8o/2j5sfVT0Kf7kxmTk/8EA5jz/GMzLdsAAEEOaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCI/Pgo8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJBZG9iZSBYTVAgQ29yZSA1LjUtYzAyMSA3OS4xNTQ5MTEsIDIwMTMvMTAvMjktMTE6NDc6MTYgICAgICAgICI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyIKICAgICAgICAgICAgeG1sbnM6ZGM9Imh0dHA6Ly9wdXJsLm9yZy9kYy9lbGVtZW50cy8xLjEvIgogICAgICAgICAgICB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIKICAgICAgICAgICAgeG1sbnM6c3RFdnQ9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZUV2ZW50IyIKICAgICAgICAgICAgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiCiAgICAgICAgICAgIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIKICAgICAgICAgICAgeG1sbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90aWZmLzEuMC8iCiAgICAgICAgICAgIHhtbG5zOmV4aWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vZXhpZi8xLjAvIj4KICAgICAgICAgPHhtcDpDcmVhdG9yVG9vbD5BZG9iZSBQaG90b3Nob3AgQ0MgKFdpbmRvd3MpPC94bXA6Q3JlYXRvclRvb2w+CiAgICAgICAgIDx4bXA6Q3JlYXRlRGF0ZT4yMDE0LTAxLTIyVDE5OjI0OjUxKzAxOjAwPC94bXA6Q3JlYXRlRGF0ZT4KICAgICAgICAgPHhtcDpNZXRhZGF0YURhdGU+MjAxNC0wMi0wNFQxNDo0MDoyOSswMTowMDwveG1wOk1ldGFkYXRhRGF0ZT4KICAgICAgICAgPHhtcDpNb2RpZnlEYXRlPjIwMTQtMDItMDRUMTQ6NDA6MjkrMDE6MDA8L3htcDpNb2RpZnlEYXRlPgogICAgICAgICA8ZGM6Zm9ybWF0PmltYWdlL3BuZzwvZGM6Zm9ybWF0PgogICAgICAgICA8eG1wTU06SW5zdGFuY2VJRD54bXAuaWlkOjVkNWIwNmQwLTVmMjAtOGE0NC1hMzIwLWZmMTEzMzQwNDc0YjwveG1wTU06SW5zdGFuY2VJRD4KICAgICAgICAgPHhtcE1NOkRvY3VtZW50SUQ+eG1wLmRpZDpFQTc2MkY5Njc0ODNFMzExOTQ4QkQxM0UyQkU3OTlBMTwveG1wTU06RG9jdW1lbnRJRD4KICAgICAgICAgPHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD54bXAuZGlkOjczQjYyQUFEOTE4M0UzMTE5NDhCRDEzRTJCRTc5OUExPC94bXBNTTpPcmlnaW5hbERvY3VtZW50SUQ+CiAgICAgICAgIDx4bXBNTTpIaXN0b3J5PgogICAgICAgICAgICA8cmRmOlNlcT4KICAgICAgICAgICAgICAgPHJkZjpsaSByZGY6cGFyc2VUeXBlPSJSZXNvdXJjZSI+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDphY3Rpb24+Y3JlYXRlZDwvc3RFdnQ6YWN0aW9uPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6aW5zdGFuY2VJRD54bXAuaWlkOjczQjYyQUFEOTE4M0UzMTE5NDhCRDEzRTJCRTc5OUExPC9zdEV2dDppbnN0YW5jZUlEPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6d2hlbj4yMDE0LTAxLTIyVDE5OjI0OjUxKzAxOjAwPC9zdEV2dDp3aGVuPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6c29mdHdhcmVBZ2VudD5BZG9iZSBQaG90b3Nob3AgQ1M2IChXaW5kb3dzKTwvc3RFdnQ6c29mdHdhcmVBZ2VudD4KICAgICAgICAgICAgICAgPC9yZGY6bGk+CiAgICAgICAgICAgICAgIDxyZGY6bGkgcmRmOnBhcnNlVHlwZT0iUmVzb3VyY2UiPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6YWN0aW9uPnNhdmVkPC9zdEV2dDphY3Rpb24+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDppbnN0YW5jZUlEPnhtcC5paWQ6RUE2MEEyNEUxOTg0RTMxMUFEQUZFRkU2RUMzMzNFMDM8L3N0RXZ0Omluc3RhbmNlSUQ+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDp3aGVuPjIwMTQtMDEtMjNUMTk6MTg6MDcrMDE6MDA8L3N0RXZ0OndoZW4+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpzb2Z0d2FyZUFnZW50PkFkb2JlIFBob3Rvc2hvcCBDUzYgKFdpbmRvd3MpPC9zdEV2dDpzb2Z0d2FyZUFnZW50PgogICAgICAgICAgICAgICAgICA8c3RFdnQ6Y2hhbmdlZD4vPC9zdEV2dDpjaGFuZ2VkPgogICAgICAgICAgICAgICA8L3JkZjpsaT4KICAgICAgICAgICAgICAgPHJkZjpsaSByZGY6cGFyc2VUeXBlPSJSZXNvdXJjZSI+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDphY3Rpb24+c2F2ZWQ8L3N0RXZ0OmFjdGlvbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0Omluc3RhbmNlSUQ+eG1wLmlpZDo2OWVmYWE1NS01ZTI5LTIzNGUtYTUzMy0xNDkxYjM1NDNmYmE8L3N0RXZ0Omluc3RhbmNlSUQ+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDp3aGVuPjIwMTQtMDItMDRUMTQ6NDA6MjkrMDE6MDA8L3N0RXZ0OndoZW4+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpzb2Z0d2FyZUFnZW50PkFkb2JlIFBob3Rvc2hvcCBDQyAoV2luZG93cyk8L3N0RXZ0OnNvZnR3YXJlQWdlbnQ+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpjaGFuZ2VkPi88L3N0RXZ0OmNoYW5nZWQ+CiAgICAgICAgICAgICAgIDwvcmRmOmxpPgogICAgICAgICAgICAgICA8cmRmOmxpIHJkZjpwYXJzZVR5cGU9IlJlc291cmNlIj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmFjdGlvbj5jb252ZXJ0ZWQ8L3N0RXZ0OmFjdGlvbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OnBhcmFtZXRlcnM+ZnJvbSBhcHBsaWNhdGlvbi92bmQuYWRvYmUucGhvdG9zaG9wIHRvIGltYWdlL3BuZzwvc3RFdnQ6cGFyYW1ldGVycz4KICAgICAgICAgICAgICAgPC9yZGY6bGk+CiAgICAgICAgICAgICAgIDxyZGY6bGkgcmRmOnBhcnNlVHlwZT0iUmVzb3VyY2UiPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6YWN0aW9uPmRlcml2ZWQ8L3N0RXZ0OmFjdGlvbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OnBhcmFtZXRlcnM+Y29udmVydGVkIGZyb20gYXBwbGljYXRpb24vdm5kLmFkb2JlLnBob3Rvc2hvcCB0byBpbWFnZS9wbmc8L3N0RXZ0OnBhcmFtZXRlcnM+CiAgICAgICAgICAgICAgIDwvcmRmOmxpPgogICAgICAgICAgICAgICA8cmRmOmxpIHJkZjpwYXJzZVR5cGU9IlJlc291cmNlIj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmFjdGlvbj5zYXZlZDwvc3RFdnQ6YWN0aW9uPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6aW5zdGFuY2VJRD54bXAuaWlkOjVkNWIwNmQwLTVmMjAtOGE0NC1hMzIwLWZmMTEzMzQwNDc0Yjwvc3RFdnQ6aW5zdGFuY2VJRD4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OndoZW4+MjAxNC0wMi0wNFQxNDo0MDoyOSswMTowMDwvc3RFdnQ6d2hlbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OnNvZnR3YXJlQWdlbnQ+QWRvYmUgUGhvdG9zaG9wIENDIChXaW5kb3dzKTwvc3RFdnQ6c29mdHdhcmVBZ2VudD4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmNoYW5nZWQ+Lzwvc3RFdnQ6Y2hhbmdlZD4KICAgICAgICAgICAgICAgPC9yZGY6bGk+CiAgICAgICAgICAgIDwvcmRmOlNlcT4KICAgICAgICAgPC94bXBNTTpIaXN0b3J5PgogICAgICAgICA8eG1wTU06RGVyaXZlZEZyb20gcmRmOnBhcnNlVHlwZT0iUmVzb3VyY2UiPgogICAgICAgICAgICA8c3RSZWY6aW5zdGFuY2VJRD54bXAuaWlkOjY5ZWZhYTU1LTVlMjktMjM0ZS1hNTMzLTE0OTFiMzU0M2ZiYTwvc3RSZWY6aW5zdGFuY2VJRD4KICAgICAgICAgICAgPHN0UmVmOmRvY3VtZW50SUQ+eG1wLmRpZDpFQTc2MkY5Njc0ODNFMzExOTQ4QkQxM0UyQkU3OTlBMTwvc3RSZWY6ZG9jdW1lbnRJRD4KICAgICAgICAgICAgPHN0UmVmOm9yaWdpbmFsRG9jdW1lbnRJRD54bXAuZGlkOjczQjYyQUFEOTE4M0UzMTE5NDhCRDEzRTJCRTc5OUExPC9zdFJlZjpvcmlnaW5hbERvY3VtZW50SUQ+CiAgICAgICAgIDwveG1wTU06RGVyaXZlZEZyb20+CiAgICAgICAgIDxwaG90b3Nob3A6Q29sb3JNb2RlPjM8L3Bob3Rvc2hvcDpDb2xvck1vZGU+CiAgICAgICAgIDxwaG90b3Nob3A6SUNDUHJvZmlsZT5zUkdCIElFQzYxOTY2LTIuMTwvcGhvdG9zaG9wOklDQ1Byb2ZpbGU+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgICAgIDx0aWZmOlhSZXNvbHV0aW9uPjcyMDA5MC8xMDAwMDwvdGlmZjpYUmVzb2x1dGlvbj4KICAgICAgICAgPHRpZmY6WVJlc29sdXRpb24+NzIwMDkwLzEwMDAwPC90aWZmOllSZXNvbHV0aW9uPgogICAgICAgICA8dGlmZjpSZXNvbHV0aW9uVW5pdD4yPC90aWZmOlJlc29sdXRpb25Vbml0PgogICAgICAgICA8ZXhpZjpDb2xvclNwYWNlPjE8L2V4aWY6Q29sb3JTcGFjZT4KICAgICAgICAgPGV4aWY6UGl4ZWxYRGltZW5zaW9uPjI0PC9leGlmOlBpeGVsWERpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6UGl4ZWxZRGltZW5zaW9uPjI0PC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+CiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgCjw/eHBhY2tldCBlbmQ9InciPz5WKqp9AAAAIGNIUk0AAHolAACAgwAA+f8AAIDpAAB1MAAA6mAAADqYAAAXb5JfxUYAAAYXSURBVHjafFZtUFTXGX7e9z27sveuMCwYV8ElrA7YSFYHtJUPkaaI0aRqG8wP00zUzljDINNSA/2ROtpO24SxnahlxjYd7SSjmUkymcxYlDhQPzHGisEVp8HwYWCVVVgEsrsuLnL74+5uqTF9Z+7cO/d8PO95zvO851BlZSV0XQcAMDOYGQBARDhX3JRmMDYZwLPMWAzGHACYIgwS46oBNBNwtOL8CwE8EkSEUCgE2rJlC2w2G4go8Zwo/bMDgnoG6gxLfAAAYvPDMCCszKTAMIAGAhrWnf15AAAMwwARIRKJgDZv3gy73Q4iAjPjxIr9VVOMRhbAYKB8zvrO0llrfEsdKwLZek6YAPSFvtSu3GtLawu0ZJ6625SHGBQB1T88t6MxvopgMAjaunUrdF0HM+P4yv27DMYeJmB1RqW3Jnf3tQX2p0L4P9EXuqEd7PmDp+XuMU9sRbvXnnt1TxxACgoKYLVacbzsQDUJGkSATe6qi28uPtzusM6Kxie6NHLGUX3lxVUNX9StPHnn4wy3njuUYcu6n2pNi66avcEXnByP/nv8aiaIyrqz2gO5A9+9FI1GIfn5+WhZdTAdjFMkwMvZOy7uWnTAOz3L4Yk71m3t69fdfTDoUGTBeHTUfiHQ6lo7Z2OXJvpDAChKe+aOCdKRKWxZ2+1qb3yyd3GYmRkQ7GQBVs99wfv6on3eR2k4PdTkDEbH7IuS8/svld/561PJS/pDk1/bzwx94pze7xc5v/H+YPY6r5BAkdrJzODTK46lE6PeYEJt7u+8j+OZwCBiEAgAoNgKJoEQf6PvNvdrXgtZoNhSf7q0KZ3B2AQmVMze0Jmt54S/DcDCVig2NcvEUGxJAE4Pl+YOr0iv6BRSIPAmBeBZAmHlE2sH4p1uhrq1s0MnnEQMBsf8wRASAICQQCCITN1X7/sOuc0kgOVp3/fPs2WHv+coG7gQOJUnLGsUCTxEjPzUohEA+NfIWUdtx0+efzA1kSSkIGyBAQNCKgHAEBAJ3u79U7kiAcWoem/gb5Fd33nrH3kp+SMWtuAB+GllMJxMjCx9QRgA3uiqL5kwHiTlpxb3smlfMDGYGPP1hcMAkJvs8ScpfdJspdj+MK6Pf+5+u29vyb4lR4+BGEziVESAkEpw6Av1OhUpHCz4qOXbzFWz4Ncdj/v/o08Lt92ODDgZDCEFJYoUGH4mzugP92puPTf0pD3H7wvfdFZdqSxnMtWjoGAAmG9fOLxjwesdjT2/XzIQ7ks3sycYMSEwGHNtWf5bkX5NkYCJBxUBXiGV0XHvosOt54Zey33j/K+8P33++vjnbiGJbbLE+J9SANAb6nJ2B79wcUwETAwQQ7fMjPzMvfP8ja87HUIKMOiaAqMZhrGmLdAy78eZrwwsTS0eObTs+IdtgVanxBUExqGbb5VzrIISGIoUXsmqbgEhJldCQWqRf27SvPAn/o8XmgLhZsUkR4ll37mhk3n94Z4OlzY/7NLcYZfm7o1z2zT4vsvUNSXqprBCkmiTFbPX90/fh8GIT2sf+zTPdDMf4dVnNg4z+E0ixsGeBs9jd5ViSgLHjCb/peaR+MD3d4/ZJg2llyuG2Vwy7QWAs8PNnn1f7vkGSGxAzE6mk+kxkx/p/4unffSCR0hAoL1EBCYiPNdWNcwkNQTCR7feWX6g+7f/A7I8rcw/U6UEe0Ndrhc/W7mtL9ztmqlSgstSS/zTJ28dalpOpkRryrwbhwBACgsLMWPGDOT4ll3qyeqAkJTdCF7P/CrUY/GkLL1rE+2hTbSH8+0Lb/WEuhzhyaA905blf9Vd/895WnZwLHrPevir/cvOB1oLYpTtLrm6oYGIMDExAaqtrUVKSgqYGSKCk0WHq5ikkWEWtNL0imv5qUW+RclLRjJsrhBAuH1/QL8R7HR4xy5nescuP23E6hOA6mLv+sb4uTw6Ogqqq6uDpmkQkcStorX4XRcM1FjZ+kvFFjCJKU1WpkNJJUqIMtX1RyLeX3JtQ0JRhmGYZ/L27duRnJycuFGISOJ9pqh5lrB6iYgqGOxRrOaa54DcZmKvkJxk8JHC9rKh+KVhOsD4+Dj+MwADIf8n5m4xGwAAAABJRU5ErkJggg==")}div.vis-network div.vis-edit-mode button.vis-button.vis-edit,div.vis-network div.vis-manipulation button.vis-button.vis-edit{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAACXBIWXMAAAsTAAALEwEAmpwYAAAKT2lDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVNnVFPpFj333vRCS4iAlEtvUhUIIFJCi4AUkSYqIQkQSoghodkVUcERRUUEG8igiAOOjoCMFVEsDIoK2AfkIaKOg6OIisr74Xuja9a89+bN/rXXPues852zzwfACAyWSDNRNYAMqUIeEeCDx8TG4eQuQIEKJHAAEAizZCFz/SMBAPh+PDwrIsAHvgABeNMLCADATZvAMByH/w/qQplcAYCEAcB0kThLCIAUAEB6jkKmAEBGAYCdmCZTAKAEAGDLY2LjAFAtAGAnf+bTAICd+Jl7AQBblCEVAaCRACATZYhEAGg7AKzPVopFAFgwABRmS8Q5ANgtADBJV2ZIALC3AMDOEAuyAAgMADBRiIUpAAR7AGDIIyN4AISZABRG8lc88SuuEOcqAAB4mbI8uSQ5RYFbCC1xB1dXLh4ozkkXKxQ2YQJhmkAuwnmZGTKBNA/g88wAAKCRFRHgg/P9eM4Ors7ONo62Dl8t6r8G/yJiYuP+5c+rcEAAAOF0ftH+LC+zGoA7BoBt/qIl7gRoXgugdfeLZrIPQLUAoOnaV/Nw+H48PEWhkLnZ2eXk5NhKxEJbYcpXff5nwl/AV/1s+X48/Pf14L7iJIEyXYFHBPjgwsz0TKUcz5IJhGLc5o9H/LcL//wd0yLESWK5WCoU41EScY5EmozzMqUiiUKSKcUl0v9k4t8s+wM+3zUAsGo+AXuRLahdYwP2SycQWHTA4vcAAPK7b8HUKAgDgGiD4c93/+8//UegJQCAZkmScQAAXkQkLlTKsz/HCAAARKCBKrBBG/TBGCzABhzBBdzBC/xgNoRCJMTCQhBCCmSAHHJgKayCQiiGzbAdKmAv1EAdNMBRaIaTcA4uwlW4Dj1wD/phCJ7BKLyBCQRByAgTYSHaiAFiilgjjggXmYX4IcFIBBKLJCDJiBRRIkuRNUgxUopUIFVIHfI9cgI5h1xGupE7yAAygvyGvEcxlIGyUT3UDLVDuag3GoRGogvQZHQxmo8WoJvQcrQaPYw2oefQq2gP2o8+Q8cwwOgYBzPEbDAuxsNCsTgsCZNjy7EirAyrxhqwVqwDu4n1Y8+xdwQSgUXACTYEd0IgYR5BSFhMWE7YSKggHCQ0EdoJNwkDhFHCJyKTqEu0JroR+cQYYjIxh1hILCPWEo8TLxB7iEPENyQSiUMyJ7mQAkmxpFTSEtJG0m5SI+ksqZs0SBojk8naZGuyBzmULCAryIXkneTD5DPkG+Qh8lsKnWJAcaT4U+IoUspqShnlEOU05QZlmDJBVaOaUt2ooVQRNY9aQq2htlKvUYeoEzR1mjnNgxZJS6WtopXTGmgXaPdpr+h0uhHdlR5Ol9BX0svpR+iX6AP0dwwNhhWDx4hnKBmbGAcYZxl3GK+YTKYZ04sZx1QwNzHrmOeZD5lvVVgqtip8FZHKCpVKlSaVGyovVKmqpqreqgtV81XLVI+pXlN9rkZVM1PjqQnUlqtVqp1Q61MbU2epO6iHqmeob1Q/pH5Z/YkGWcNMw09DpFGgsV/jvMYgC2MZs3gsIWsNq4Z1gTXEJrHN2Xx2KruY/R27iz2qqaE5QzNKM1ezUvOUZj8H45hx+Jx0TgnnKKeX836K3hTvKeIpG6Y0TLkxZVxrqpaXllirSKtRq0frvTau7aedpr1Fu1n7gQ5Bx0onXCdHZ4/OBZ3nU9lT3acKpxZNPTr1ri6qa6UbobtEd79up+6Ynr5egJ5Mb6feeb3n+hx9L/1U/W36p/VHDFgGswwkBtsMzhg8xTVxbzwdL8fb8VFDXcNAQ6VhlWGX4YSRudE8o9VGjUYPjGnGXOMk423GbcajJgYmISZLTepN7ppSTbmmKaY7TDtMx83MzaLN1pk1mz0x1zLnm+eb15vft2BaeFostqi2uGVJsuRaplnutrxuhVo5WaVYVVpds0atna0l1rutu6cRp7lOk06rntZnw7Dxtsm2qbcZsOXYBtuutm22fWFnYhdnt8Wuw+6TvZN9un2N/T0HDYfZDqsdWh1+c7RyFDpWOt6azpzuP33F9JbpL2dYzxDP2DPjthPLKcRpnVOb00dnF2e5c4PziIuJS4LLLpc+Lpsbxt3IveRKdPVxXeF60vWdm7Obwu2o26/uNu5p7ofcn8w0nymeWTNz0MPIQ+BR5dE/C5+VMGvfrH5PQ0+BZ7XnIy9jL5FXrdewt6V3qvdh7xc+9j5yn+M+4zw33jLeWV/MN8C3yLfLT8Nvnl+F30N/I/9k/3r/0QCngCUBZwOJgUGBWwL7+Hp8Ib+OPzrbZfay2e1BjKC5QRVBj4KtguXBrSFoyOyQrSH355jOkc5pDoVQfujW0Adh5mGLw34MJ4WHhVeGP45wiFga0TGXNXfR3ENz30T6RJZE3ptnMU85ry1KNSo+qi5qPNo3ujS6P8YuZlnM1VidWElsSxw5LiquNm5svt/87fOH4p3iC+N7F5gvyF1weaHOwvSFpxapLhIsOpZATIhOOJTwQRAqqBaMJfITdyWOCnnCHcJnIi/RNtGI2ENcKh5O8kgqTXqS7JG8NXkkxTOlLOW5hCepkLxMDUzdmzqeFpp2IG0yPTq9MYOSkZBxQqohTZO2Z+pn5mZ2y6xlhbL+xW6Lty8elQfJa7OQrAVZLQq2QqboVFoo1yoHsmdlV2a/zYnKOZarnivN7cyzytuQN5zvn//tEsIS4ZK2pYZLVy0dWOa9rGo5sjxxedsK4xUFK4ZWBqw8uIq2Km3VT6vtV5eufr0mek1rgV7ByoLBtQFr6wtVCuWFfevc1+1dT1gvWd+1YfqGnRs+FYmKrhTbF5cVf9go3HjlG4dvyr+Z3JS0qavEuWTPZtJm6ebeLZ5bDpaql+aXDm4N2dq0Dd9WtO319kXbL5fNKNu7g7ZDuaO/PLi8ZafJzs07P1SkVPRU+lQ27tLdtWHX+G7R7ht7vPY07NXbW7z3/T7JvttVAVVN1WbVZftJ+7P3P66Jqun4lvttXa1ObXHtxwPSA/0HIw6217nU1R3SPVRSj9Yr60cOxx++/p3vdy0NNg1VjZzG4iNwRHnk6fcJ3/ceDTradox7rOEH0x92HWcdL2pCmvKaRptTmvtbYlu6T8w+0dbq3nr8R9sfD5w0PFl5SvNUyWna6YLTk2fyz4ydlZ19fi753GDborZ752PO32oPb++6EHTh0kX/i+c7vDvOXPK4dPKy2+UTV7hXmq86X23qdOo8/pPTT8e7nLuarrlca7nuer21e2b36RueN87d9L158Rb/1tWeOT3dvfN6b/fF9/XfFt1+cif9zsu72Xcn7q28T7xf9EDtQdlD3YfVP1v+3Njv3H9qwHeg89HcR/cGhYPP/pH1jw9DBY+Zj8uGDYbrnjg+OTniP3L96fynQ89kzyaeF/6i/suuFxYvfvjV69fO0ZjRoZfyl5O/bXyl/erA6xmv28bCxh6+yXgzMV70VvvtwXfcdx3vo98PT+R8IH8o/2j5sfVT0Kf7kxmTk/8EA5jz/GMzLdsAAEEOaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCI/Pgo8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJBZG9iZSBYTVAgQ29yZSA1LjUtYzAyMSA3OS4xNTQ5MTEsIDIwMTMvMTAvMjktMTE6NDc6MTYgICAgICAgICI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyIKICAgICAgICAgICAgeG1sbnM6ZGM9Imh0dHA6Ly9wdXJsLm9yZy9kYy9lbGVtZW50cy8xLjEvIgogICAgICAgICAgICB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIKICAgICAgICAgICAgeG1sbnM6c3RFdnQ9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZUV2ZW50IyIKICAgICAgICAgICAgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiCiAgICAgICAgICAgIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIKICAgICAgICAgICAgeG1sbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90aWZmLzEuMC8iCiAgICAgICAgICAgIHhtbG5zOmV4aWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vZXhpZi8xLjAvIj4KICAgICAgICAgPHhtcDpDcmVhdG9yVG9vbD5BZG9iZSBQaG90b3Nob3AgQ0MgKFdpbmRvd3MpPC94bXA6Q3JlYXRvclRvb2w+CiAgICAgICAgIDx4bXA6Q3JlYXRlRGF0ZT4yMDE0LTAxLTIyVDE5OjI0OjUxKzAxOjAwPC94bXA6Q3JlYXRlRGF0ZT4KICAgICAgICAgPHhtcDpNZXRhZGF0YURhdGU+MjAxNC0wMi0wNVQxNDoxMjoyNSswMTowMDwveG1wOk1ldGFkYXRhRGF0ZT4KICAgICAgICAgPHhtcDpNb2RpZnlEYXRlPjIwMTQtMDItMDVUMTQ6MTI6MjUrMDE6MDA8L3htcDpNb2RpZnlEYXRlPgogICAgICAgICA8ZGM6Zm9ybWF0PmltYWdlL3BuZzwvZGM6Zm9ybWF0PgogICAgICAgICA8eG1wTU06SW5zdGFuY2VJRD54bXAuaWlkOjY5OTM3ZGZjLTJjNzQtYTU0YS05OTIzLTQyMmZhNDNkMjljNDwveG1wTU06SW5zdGFuY2VJRD4KICAgICAgICAgPHhtcE1NOkRvY3VtZW50SUQ+eG1wLmRpZDpFQTc2MkY5Njc0ODNFMzExOTQ4QkQxM0UyQkU3OTlBMTwveG1wTU06RG9jdW1lbnRJRD4KICAgICAgICAgPHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD54bXAuZGlkOjczQjYyQUFEOTE4M0UzMTE5NDhCRDEzRTJCRTc5OUExPC94bXBNTTpPcmlnaW5hbERvY3VtZW50SUQ+CiAgICAgICAgIDx4bXBNTTpIaXN0b3J5PgogICAgICAgICAgICA8cmRmOlNlcT4KICAgICAgICAgICAgICAgPHJkZjpsaSByZGY6cGFyc2VUeXBlPSJSZXNvdXJjZSI+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDphY3Rpb24+Y3JlYXRlZDwvc3RFdnQ6YWN0aW9uPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6aW5zdGFuY2VJRD54bXAuaWlkOjczQjYyQUFEOTE4M0UzMTE5NDhCRDEzRTJCRTc5OUExPC9zdEV2dDppbnN0YW5jZUlEPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6d2hlbj4yMDE0LTAxLTIyVDE5OjI0OjUxKzAxOjAwPC9zdEV2dDp3aGVuPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6c29mdHdhcmVBZ2VudD5BZG9iZSBQaG90b3Nob3AgQ1M2IChXaW5kb3dzKTwvc3RFdnQ6c29mdHdhcmVBZ2VudD4KICAgICAgICAgICAgICAgPC9yZGY6bGk+CiAgICAgICAgICAgICAgIDxyZGY6bGkgcmRmOnBhcnNlVHlwZT0iUmVzb3VyY2UiPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6YWN0aW9uPnNhdmVkPC9zdEV2dDphY3Rpb24+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDppbnN0YW5jZUlEPnhtcC5paWQ6RUE2MEEyNEUxOTg0RTMxMUFEQUZFRkU2RUMzMzNFMDM8L3N0RXZ0Omluc3RhbmNlSUQ+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDp3aGVuPjIwMTQtMDEtMjNUMTk6MTg6MDcrMDE6MDA8L3N0RXZ0OndoZW4+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpzb2Z0d2FyZUFnZW50PkFkb2JlIFBob3Rvc2hvcCBDUzYgKFdpbmRvd3MpPC9zdEV2dDpzb2Z0d2FyZUFnZW50PgogICAgICAgICAgICAgICAgICA8c3RFdnQ6Y2hhbmdlZD4vPC9zdEV2dDpjaGFuZ2VkPgogICAgICAgICAgICAgICA8L3JkZjpsaT4KICAgICAgICAgICAgICAgPHJkZjpsaSByZGY6cGFyc2VUeXBlPSJSZXNvdXJjZSI+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDphY3Rpb24+c2F2ZWQ8L3N0RXZ0OmFjdGlvbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0Omluc3RhbmNlSUQ+eG1wLmlpZDozOWNhNzE5ZC03YzNlLTUyNGEtYmY1NS03NGVmMmM1MzE0YTc8L3N0RXZ0Omluc3RhbmNlSUQ+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDp3aGVuPjIwMTQtMDItMDVUMTQ6MTI6MjUrMDE6MDA8L3N0RXZ0OndoZW4+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpzb2Z0d2FyZUFnZW50PkFkb2JlIFBob3Rvc2hvcCBDQyAoV2luZG93cyk8L3N0RXZ0OnNvZnR3YXJlQWdlbnQ+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpjaGFuZ2VkPi88L3N0RXZ0OmNoYW5nZWQ+CiAgICAgICAgICAgICAgIDwvcmRmOmxpPgogICAgICAgICAgICAgICA8cmRmOmxpIHJkZjpwYXJzZVR5cGU9IlJlc291cmNlIj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmFjdGlvbj5jb252ZXJ0ZWQ8L3N0RXZ0OmFjdGlvbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OnBhcmFtZXRlcnM+ZnJvbSBhcHBsaWNhdGlvbi92bmQuYWRvYmUucGhvdG9zaG9wIHRvIGltYWdlL3BuZzwvc3RFdnQ6cGFyYW1ldGVycz4KICAgICAgICAgICAgICAgPC9yZGY6bGk+CiAgICAgICAgICAgICAgIDxyZGY6bGkgcmRmOnBhcnNlVHlwZT0iUmVzb3VyY2UiPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6YWN0aW9uPmRlcml2ZWQ8L3N0RXZ0OmFjdGlvbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OnBhcmFtZXRlcnM+Y29udmVydGVkIGZyb20gYXBwbGljYXRpb24vdm5kLmFkb2JlLnBob3Rvc2hvcCB0byBpbWFnZS9wbmc8L3N0RXZ0OnBhcmFtZXRlcnM+CiAgICAgICAgICAgICAgIDwvcmRmOmxpPgogICAgICAgICAgICAgICA8cmRmOmxpIHJkZjpwYXJzZVR5cGU9IlJlc291cmNlIj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmFjdGlvbj5zYXZlZDwvc3RFdnQ6YWN0aW9uPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6aW5zdGFuY2VJRD54bXAuaWlkOjY5OTM3ZGZjLTJjNzQtYTU0YS05OTIzLTQyMmZhNDNkMjljNDwvc3RFdnQ6aW5zdGFuY2VJRD4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OndoZW4+MjAxNC0wMi0wNVQxNDoxMjoyNSswMTowMDwvc3RFdnQ6d2hlbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OnNvZnR3YXJlQWdlbnQ+QWRvYmUgUGhvdG9zaG9wIENDIChXaW5kb3dzKTwvc3RFdnQ6c29mdHdhcmVBZ2VudD4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmNoYW5nZWQ+Lzwvc3RFdnQ6Y2hhbmdlZD4KICAgICAgICAgICAgICAgPC9yZGY6bGk+CiAgICAgICAgICAgIDwvcmRmOlNlcT4KICAgICAgICAgPC94bXBNTTpIaXN0b3J5PgogICAgICAgICA8eG1wTU06RGVyaXZlZEZyb20gcmRmOnBhcnNlVHlwZT0iUmVzb3VyY2UiPgogICAgICAgICAgICA8c3RSZWY6aW5zdGFuY2VJRD54bXAuaWlkOjM5Y2E3MTlkLTdjM2UtNTI0YS1iZjU1LTc0ZWYyYzUzMTRhNzwvc3RSZWY6aW5zdGFuY2VJRD4KICAgICAgICAgICAgPHN0UmVmOmRvY3VtZW50SUQ+eG1wLmRpZDpFQTc2MkY5Njc0ODNFMzExOTQ4QkQxM0UyQkU3OTlBMTwvc3RSZWY6ZG9jdW1lbnRJRD4KICAgICAgICAgICAgPHN0UmVmOm9yaWdpbmFsRG9jdW1lbnRJRD54bXAuZGlkOjczQjYyQUFEOTE4M0UzMTE5NDhCRDEzRTJCRTc5OUExPC9zdFJlZjpvcmlnaW5hbERvY3VtZW50SUQ+CiAgICAgICAgIDwveG1wTU06RGVyaXZlZEZyb20+CiAgICAgICAgIDxwaG90b3Nob3A6Q29sb3JNb2RlPjM8L3Bob3Rvc2hvcDpDb2xvck1vZGU+CiAgICAgICAgIDxwaG90b3Nob3A6SUNDUHJvZmlsZT5zUkdCIElFQzYxOTY2LTIuMTwvcGhvdG9zaG9wOklDQ1Byb2ZpbGU+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgICAgIDx0aWZmOlhSZXNvbHV0aW9uPjcyMDA5MC8xMDAwMDwvdGlmZjpYUmVzb2x1dGlvbj4KICAgICAgICAgPHRpZmY6WVJlc29sdXRpb24+NzIwMDkwLzEwMDAwPC90aWZmOllSZXNvbHV0aW9uPgogICAgICAgICA8dGlmZjpSZXNvbHV0aW9uVW5pdD4yPC90aWZmOlJlc29sdXRpb25Vbml0PgogICAgICAgICA8ZXhpZjpDb2xvclNwYWNlPjE8L2V4aWY6Q29sb3JTcGFjZT4KICAgICAgICAgPGV4aWY6UGl4ZWxYRGltZW5zaW9uPjI0PC9leGlmOlBpeGVsWERpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6UGl4ZWxZRGltZW5zaW9uPjI0PC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+CiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgCjw/eHBhY2tldCBlbmQ9InciPz4ykninAAAAIGNIUk0AAHolAACAgwAA+f8AAIDpAAB1MAAA6mAAADqYAAAXb5JfxUYAAAYpSURBVHjafFZtTFvnFX7Oea+NudiY2Hwam4CBlgQwXdKREDKUoYg0jbRJ29RJ2VZ1mjRFUxSpA3VTfkzJfkQbS7spU6rtx5Z2UtppScjaHxvLuiatWi2jLEoMIUDCh23g2gbj7+tPuPvhOurawPl1dc99n+c55z33fV46ceIEZFkGADAziAgAQERoe/9ZK4GPM/AcgbsIXAcABCgMvkfAqAa89eDoJyF8LogIqqqChoaGYDAYHr8kItS8uc8iIH6iAa9IkAo5EAQX8pqmgUVBCBggYFgDhv0/GAsBgKZpICJkMhnQ4OAgZFkGEYGZUXmp+0cS+CKBwWA0DVRPOg5Zl2q6zaHyJlnVAMQXVTkwHrUqH0Xsvn+tdQAAMQDgpPLS2MViFY8rkGUZzIzaS/t/xqCzGggtz9e697zsnKhoLUtim4jOq/LE6x7X0nsh16dEZ5a/O3a2SCAOHjwInU6Hujd6ThJ4mCDQ+b2G232v7v6vwarPbQn8MGlMr+X0kpE3Wr5Zt5hL5HPhqYSdQIfKJ+yhxDPKWC6Xg+jt7UXD5b5KBt1kCHS85Ljd8/On3NupfnhFaZj4rWff1B98B1R/hnUmKd36bdtCNl4g0en4edNE/cXwLq8qMTMIPAQwmo/WuHvObA8+9c58k/dKtD0TyZWXN5YGA7ej7epKxspM//7SoNOdWc/Jyq2wiwhDzPxT8cP0jys3VMM7OmL0/77zn4Ydui3b8uiK0jD7RrA77c9Wd57cefPpF+2T6bWsFPWkaiPTCWvTsZpHFU+XrS+8G3AR08F6X+1FJvBxQQzHQOWk2SmrW4FPX/U2LVwPuDZj+fJKl2khPpeyAqA9rzR/YqwuiWXX8taN/CabGkrVuq9YJlkQQDjOAJ5jAhz9Vt9W4N5/rNp8I+vtMV/aZm4zLnUNNt0urdYnF68HWoJj4Wo1mLGUNRr8LEgDgNqeCh8xQIKOsgC7iAjVe83rT9zQa8uNM28u70kspessu8q8zq/V3NcZpVzb9+0zmVhOvvvrhaMVzrJg0zeq7xMVCCwdpnWSGBqjUyJwLTFgbvxie3w31uoWR1Y74r60rdxZqrR8q85t2W2MGCp12bm/KC3hyaSTiMhxuGrKcahqpbjOaDOoEhOEoFqJQCCJvqA85I6bfTdDjQlf2lbxVNlS6wt19yy7jRHZZlDnrinNj/6sHMhnNw2Ogco7O79e5fm/xQywRBBCEAuwn4gQ96bkYj4Vyuq9N1Z3Bj4Od5bs0MXt/dZZ21ctiqFan174q985P+Lfp+U1g7XDON/1ctP458WlVjLyJhOISZE0wM0S1QfuRC3lTjkJAKKEtNC9eIOhSh9xHLZOJRZTFuXDsEoStLkR/768ummsaJG9Pb9oe+9J+xaeSVokiQDSJphAo5uaBuWjiKP4QTqS1cUWU7ayesN66wu22frD1vmVW6GW6T8u9eVjGyZzs+w78Nqu0a2mbvVu1KEJQAgeZRL0liQYyx+GOmKeQpu0rMYsAJPNEFGD2dLodLIy6c9Ys7G8yeSUl3tf2/X3rcBVJSOv34l3sCBogi7z1LH/rBHjl4IJ93/ncQFAnjeImJD0Z8zuCwu9q3djDXqTlAKID5xv+9t2R8n8VcUFBljQ8Gyfe40BYBM4DwDLt8Kue79ZcFkbzfEdbUbv+oN4c9KTtsfm1MbYQqqh+2zrVZYKs/7Ef+byimt1POYiJhDhPBFBIiIEXhxfs7/dfYoIF+auBfYTE/pebx/V8hqBP2ODvD34yvuh/WCAmU75Bx6sIgaI/v5+6PV6JLqUsYr7dpDAoehs0h73pHTWrvKgThYbRSt9UmSjef3MpaUvBz4O72UmADgTOPJguGiZor+/HyUlJWBmJFz+D8xTtlUiOpbwpmrmrweeSXrT+g11k4SBN3RGKUcAVCVdFhyP1nreDbY//NPyEXUlU/Pp4XYycGT6V0Ux2WwWdO7cOZSWlkII8diX7SPPNgDaKdbxoNAxwATBAEkEEgSWCEQAqPAMwqvMdCEwMO0tVqZpWsGTT58+DaPR+PhGIYQAAAgh0P7B3ioW/B0iGiCGiwXbCuOHFSJys6AbYFye2T+xWhT3WYJEIoH/DQBMw3kes8OJPgAAAABJRU5ErkJggg==")}div.vis-network div.vis-edit-mode button.vis-button.vis-edit.vis-edit-mode{background-color:#fcfcfc;border:1px solid #ccc}div.vis-network div.vis-manipulation button.vis-button.vis-connect{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAACXBIWXMAAAsTAAALEwEAmpwYAAAKT2lDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVNnVFPpFj333vRCS4iAlEtvUhUIIFJCi4AUkSYqIQkQSoghodkVUcERRUUEG8igiAOOjoCMFVEsDIoK2AfkIaKOg6OIisr74Xuja9a89+bN/rXXPues852zzwfACAyWSDNRNYAMqUIeEeCDx8TG4eQuQIEKJHAAEAizZCFz/SMBAPh+PDwrIsAHvgABeNMLCADATZvAMByH/w/qQplcAYCEAcB0kThLCIAUAEB6jkKmAEBGAYCdmCZTAKAEAGDLY2LjAFAtAGAnf+bTAICd+Jl7AQBblCEVAaCRACATZYhEAGg7AKzPVopFAFgwABRmS8Q5ANgtADBJV2ZIALC3AMDOEAuyAAgMADBRiIUpAAR7AGDIIyN4AISZABRG8lc88SuuEOcqAAB4mbI8uSQ5RYFbCC1xB1dXLh4ozkkXKxQ2YQJhmkAuwnmZGTKBNA/g88wAAKCRFRHgg/P9eM4Ors7ONo62Dl8t6r8G/yJiYuP+5c+rcEAAAOF0ftH+LC+zGoA7BoBt/qIl7gRoXgugdfeLZrIPQLUAoOnaV/Nw+H48PEWhkLnZ2eXk5NhKxEJbYcpXff5nwl/AV/1s+X48/Pf14L7iJIEyXYFHBPjgwsz0TKUcz5IJhGLc5o9H/LcL//wd0yLESWK5WCoU41EScY5EmozzMqUiiUKSKcUl0v9k4t8s+wM+3zUAsGo+AXuRLahdYwP2SycQWHTA4vcAAPK7b8HUKAgDgGiD4c93/+8//UegJQCAZkmScQAAXkQkLlTKsz/HCAAARKCBKrBBG/TBGCzABhzBBdzBC/xgNoRCJMTCQhBCCmSAHHJgKayCQiiGzbAdKmAv1EAdNMBRaIaTcA4uwlW4Dj1wD/phCJ7BKLyBCQRByAgTYSHaiAFiilgjjggXmYX4IcFIBBKLJCDJiBRRIkuRNUgxUopUIFVIHfI9cgI5h1xGupE7yAAygvyGvEcxlIGyUT3UDLVDuag3GoRGogvQZHQxmo8WoJvQcrQaPYw2oefQq2gP2o8+Q8cwwOgYBzPEbDAuxsNCsTgsCZNjy7EirAyrxhqwVqwDu4n1Y8+xdwQSgUXACTYEd0IgYR5BSFhMWE7YSKggHCQ0EdoJNwkDhFHCJyKTqEu0JroR+cQYYjIxh1hILCPWEo8TLxB7iEPENyQSiUMyJ7mQAkmxpFTSEtJG0m5SI+ksqZs0SBojk8naZGuyBzmULCAryIXkneTD5DPkG+Qh8lsKnWJAcaT4U+IoUspqShnlEOU05QZlmDJBVaOaUt2ooVQRNY9aQq2htlKvUYeoEzR1mjnNgxZJS6WtopXTGmgXaPdpr+h0uhHdlR5Ol9BX0svpR+iX6AP0dwwNhhWDx4hnKBmbGAcYZxl3GK+YTKYZ04sZx1QwNzHrmOeZD5lvVVgqtip8FZHKCpVKlSaVGyovVKmqpqreqgtV81XLVI+pXlN9rkZVM1PjqQnUlqtVqp1Q61MbU2epO6iHqmeob1Q/pH5Z/YkGWcNMw09DpFGgsV/jvMYgC2MZs3gsIWsNq4Z1gTXEJrHN2Xx2KruY/R27iz2qqaE5QzNKM1ezUvOUZj8H45hx+Jx0TgnnKKeX836K3hTvKeIpG6Y0TLkxZVxrqpaXllirSKtRq0frvTau7aedpr1Fu1n7gQ5Bx0onXCdHZ4/OBZ3nU9lT3acKpxZNPTr1ri6qa6UbobtEd79up+6Ynr5egJ5Mb6feeb3n+hx9L/1U/W36p/VHDFgGswwkBtsMzhg8xTVxbzwdL8fb8VFDXcNAQ6VhlWGX4YSRudE8o9VGjUYPjGnGXOMk423GbcajJgYmISZLTepN7ppSTbmmKaY7TDtMx83MzaLN1pk1mz0x1zLnm+eb15vft2BaeFostqi2uGVJsuRaplnutrxuhVo5WaVYVVpds0atna0l1rutu6cRp7lOk06rntZnw7Dxtsm2qbcZsOXYBtuutm22fWFnYhdnt8Wuw+6TvZN9un2N/T0HDYfZDqsdWh1+c7RyFDpWOt6azpzuP33F9JbpL2dYzxDP2DPjthPLKcRpnVOb00dnF2e5c4PziIuJS4LLLpc+Lpsbxt3IveRKdPVxXeF60vWdm7Obwu2o26/uNu5p7ofcn8w0nymeWTNz0MPIQ+BR5dE/C5+VMGvfrH5PQ0+BZ7XnIy9jL5FXrdewt6V3qvdh7xc+9j5yn+M+4zw33jLeWV/MN8C3yLfLT8Nvnl+F30N/I/9k/3r/0QCngCUBZwOJgUGBWwL7+Hp8Ib+OPzrbZfay2e1BjKC5QRVBj4KtguXBrSFoyOyQrSH355jOkc5pDoVQfujW0Adh5mGLw34MJ4WHhVeGP45wiFga0TGXNXfR3ENz30T6RJZE3ptnMU85ry1KNSo+qi5qPNo3ujS6P8YuZlnM1VidWElsSxw5LiquNm5svt/87fOH4p3iC+N7F5gvyF1weaHOwvSFpxapLhIsOpZATIhOOJTwQRAqqBaMJfITdyWOCnnCHcJnIi/RNtGI2ENcKh5O8kgqTXqS7JG8NXkkxTOlLOW5hCepkLxMDUzdmzqeFpp2IG0yPTq9MYOSkZBxQqohTZO2Z+pn5mZ2y6xlhbL+xW6Lty8elQfJa7OQrAVZLQq2QqboVFoo1yoHsmdlV2a/zYnKOZarnivN7cyzytuQN5zvn//tEsIS4ZK2pYZLVy0dWOa9rGo5sjxxedsK4xUFK4ZWBqw8uIq2Km3VT6vtV5eufr0mek1rgV7ByoLBtQFr6wtVCuWFfevc1+1dT1gvWd+1YfqGnRs+FYmKrhTbF5cVf9go3HjlG4dvyr+Z3JS0qavEuWTPZtJm6ebeLZ5bDpaql+aXDm4N2dq0Dd9WtO319kXbL5fNKNu7g7ZDuaO/PLi8ZafJzs07P1SkVPRU+lQ27tLdtWHX+G7R7ht7vPY07NXbW7z3/T7JvttVAVVN1WbVZftJ+7P3P66Jqun4lvttXa1ObXHtxwPSA/0HIw6217nU1R3SPVRSj9Yr60cOxx++/p3vdy0NNg1VjZzG4iNwRHnk6fcJ3/ceDTradox7rOEH0x92HWcdL2pCmvKaRptTmvtbYlu6T8w+0dbq3nr8R9sfD5w0PFl5SvNUyWna6YLTk2fyz4ydlZ19fi753GDborZ752PO32oPb++6EHTh0kX/i+c7vDvOXPK4dPKy2+UTV7hXmq86X23qdOo8/pPTT8e7nLuarrlca7nuer21e2b36RueN87d9L158Rb/1tWeOT3dvfN6b/fF9/XfFt1+cif9zsu72Xcn7q28T7xf9EDtQdlD3YfVP1v+3Njv3H9qwHeg89HcR/cGhYPP/pH1jw9DBY+Zj8uGDYbrnjg+OTniP3L96fynQ89kzyaeF/6i/suuFxYvfvjV69fO0ZjRoZfyl5O/bXyl/erA6xmv28bCxh6+yXgzMV70VvvtwXfcdx3vo98PT+R8IH8o/2j5sfVT0Kf7kxmTk/8EA5jz/GMzLdsAAEEOaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCI/Pgo8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJBZG9iZSBYTVAgQ29yZSA1LjUtYzAyMSA3OS4xNTQ5MTEsIDIwMTMvMTAvMjktMTE6NDc6MTYgICAgICAgICI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyIKICAgICAgICAgICAgeG1sbnM6ZGM9Imh0dHA6Ly9wdXJsLm9yZy9kYy9lbGVtZW50cy8xLjEvIgogICAgICAgICAgICB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIKICAgICAgICAgICAgeG1sbnM6c3RFdnQ9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZUV2ZW50IyIKICAgICAgICAgICAgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiCiAgICAgICAgICAgIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIKICAgICAgICAgICAgeG1sbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90aWZmLzEuMC8iCiAgICAgICAgICAgIHhtbG5zOmV4aWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vZXhpZi8xLjAvIj4KICAgICAgICAgPHhtcDpDcmVhdG9yVG9vbD5BZG9iZSBQaG90b3Nob3AgQ0MgKFdpbmRvd3MpPC94bXA6Q3JlYXRvclRvb2w+CiAgICAgICAgIDx4bXA6Q3JlYXRlRGF0ZT4yMDE0LTAxLTIyVDE5OjI0OjUxKzAxOjAwPC94bXA6Q3JlYXRlRGF0ZT4KICAgICAgICAgPHhtcDpNZXRhZGF0YURhdGU+MjAxNC0wMi0wNFQxNDozODo1NyswMTowMDwveG1wOk1ldGFkYXRhRGF0ZT4KICAgICAgICAgPHhtcDpNb2RpZnlEYXRlPjIwMTQtMDItMDRUMTQ6Mzg6NTcrMDE6MDA8L3htcDpNb2RpZnlEYXRlPgogICAgICAgICA8ZGM6Zm9ybWF0PmltYWdlL3BuZzwvZGM6Zm9ybWF0PgogICAgICAgICA8eG1wTU06SW5zdGFuY2VJRD54bXAuaWlkOjlmYjUwMDU0LWE3ODEtMWQ0OC05ZTllLTU2ZWQ5YzhlYjdjNjwveG1wTU06SW5zdGFuY2VJRD4KICAgICAgICAgPHhtcE1NOkRvY3VtZW50SUQ+eG1wLmRpZDpFQTc2MkY5Njc0ODNFMzExOTQ4QkQxM0UyQkU3OTlBMTwveG1wTU06RG9jdW1lbnRJRD4KICAgICAgICAgPHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD54bXAuZGlkOjczQjYyQUFEOTE4M0UzMTE5NDhCRDEzRTJCRTc5OUExPC94bXBNTTpPcmlnaW5hbERvY3VtZW50SUQ+CiAgICAgICAgIDx4bXBNTTpIaXN0b3J5PgogICAgICAgICAgICA8cmRmOlNlcT4KICAgICAgICAgICAgICAgPHJkZjpsaSByZGY6cGFyc2VUeXBlPSJSZXNvdXJjZSI+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDphY3Rpb24+Y3JlYXRlZDwvc3RFdnQ6YWN0aW9uPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6aW5zdGFuY2VJRD54bXAuaWlkOjczQjYyQUFEOTE4M0UzMTE5NDhCRDEzRTJCRTc5OUExPC9zdEV2dDppbnN0YW5jZUlEPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6d2hlbj4yMDE0LTAxLTIyVDE5OjI0OjUxKzAxOjAwPC9zdEV2dDp3aGVuPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6c29mdHdhcmVBZ2VudD5BZG9iZSBQaG90b3Nob3AgQ1M2IChXaW5kb3dzKTwvc3RFdnQ6c29mdHdhcmVBZ2VudD4KICAgICAgICAgICAgICAgPC9yZGY6bGk+CiAgICAgICAgICAgICAgIDxyZGY6bGkgcmRmOnBhcnNlVHlwZT0iUmVzb3VyY2UiPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6YWN0aW9uPnNhdmVkPC9zdEV2dDphY3Rpb24+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDppbnN0YW5jZUlEPnhtcC5paWQ6RUE2MEEyNEUxOTg0RTMxMUFEQUZFRkU2RUMzMzNFMDM8L3N0RXZ0Omluc3RhbmNlSUQ+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDp3aGVuPjIwMTQtMDEtMjNUMTk6MTg6MDcrMDE6MDA8L3N0RXZ0OndoZW4+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpzb2Z0d2FyZUFnZW50PkFkb2JlIFBob3Rvc2hvcCBDUzYgKFdpbmRvd3MpPC9zdEV2dDpzb2Z0d2FyZUFnZW50PgogICAgICAgICAgICAgICAgICA8c3RFdnQ6Y2hhbmdlZD4vPC9zdEV2dDpjaGFuZ2VkPgogICAgICAgICAgICAgICA8L3JkZjpsaT4KICAgICAgICAgICAgICAgPHJkZjpsaSByZGY6cGFyc2VUeXBlPSJSZXNvdXJjZSI+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDphY3Rpb24+c2F2ZWQ8L3N0RXZ0OmFjdGlvbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0Omluc3RhbmNlSUQ+eG1wLmlpZDo3ZWRhMjI0MC0yYTQxLTNlNDQtYWM2My1iNzNiYTE5OWI3Y2E8L3N0RXZ0Omluc3RhbmNlSUQ+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDp3aGVuPjIwMTQtMDItMDRUMTQ6Mzg6NTcrMDE6MDA8L3N0RXZ0OndoZW4+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpzb2Z0d2FyZUFnZW50PkFkb2JlIFBob3Rvc2hvcCBDQyAoV2luZG93cyk8L3N0RXZ0OnNvZnR3YXJlQWdlbnQ+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpjaGFuZ2VkPi88L3N0RXZ0OmNoYW5nZWQ+CiAgICAgICAgICAgICAgIDwvcmRmOmxpPgogICAgICAgICAgICAgICA8cmRmOmxpIHJkZjpwYXJzZVR5cGU9IlJlc291cmNlIj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmFjdGlvbj5jb252ZXJ0ZWQ8L3N0RXZ0OmFjdGlvbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OnBhcmFtZXRlcnM+ZnJvbSBhcHBsaWNhdGlvbi92bmQuYWRvYmUucGhvdG9zaG9wIHRvIGltYWdlL3BuZzwvc3RFdnQ6cGFyYW1ldGVycz4KICAgICAgICAgICAgICAgPC9yZGY6bGk+CiAgICAgICAgICAgICAgIDxyZGY6bGkgcmRmOnBhcnNlVHlwZT0iUmVzb3VyY2UiPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6YWN0aW9uPmRlcml2ZWQ8L3N0RXZ0OmFjdGlvbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OnBhcmFtZXRlcnM+Y29udmVydGVkIGZyb20gYXBwbGljYXRpb24vdm5kLmFkb2JlLnBob3Rvc2hvcCB0byBpbWFnZS9wbmc8L3N0RXZ0OnBhcmFtZXRlcnM+CiAgICAgICAgICAgICAgIDwvcmRmOmxpPgogICAgICAgICAgICAgICA8cmRmOmxpIHJkZjpwYXJzZVR5cGU9IlJlc291cmNlIj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmFjdGlvbj5zYXZlZDwvc3RFdnQ6YWN0aW9uPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6aW5zdGFuY2VJRD54bXAuaWlkOjlmYjUwMDU0LWE3ODEtMWQ0OC05ZTllLTU2ZWQ5YzhlYjdjNjwvc3RFdnQ6aW5zdGFuY2VJRD4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OndoZW4+MjAxNC0wMi0wNFQxNDozODo1NyswMTowMDwvc3RFdnQ6d2hlbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OnNvZnR3YXJlQWdlbnQ+QWRvYmUgUGhvdG9zaG9wIENDIChXaW5kb3dzKTwvc3RFdnQ6c29mdHdhcmVBZ2VudD4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmNoYW5nZWQ+Lzwvc3RFdnQ6Y2hhbmdlZD4KICAgICAgICAgICAgICAgPC9yZGY6bGk+CiAgICAgICAgICAgIDwvcmRmOlNlcT4KICAgICAgICAgPC94bXBNTTpIaXN0b3J5PgogICAgICAgICA8eG1wTU06RGVyaXZlZEZyb20gcmRmOnBhcnNlVHlwZT0iUmVzb3VyY2UiPgogICAgICAgICAgICA8c3RSZWY6aW5zdGFuY2VJRD54bXAuaWlkOjdlZGEyMjQwLTJhNDEtM2U0NC1hYzYzLWI3M2JhMTk5YjdjYTwvc3RSZWY6aW5zdGFuY2VJRD4KICAgICAgICAgICAgPHN0UmVmOmRvY3VtZW50SUQ+eG1wLmRpZDpFQTc2MkY5Njc0ODNFMzExOTQ4QkQxM0UyQkU3OTlBMTwvc3RSZWY6ZG9jdW1lbnRJRD4KICAgICAgICAgICAgPHN0UmVmOm9yaWdpbmFsRG9jdW1lbnRJRD54bXAuZGlkOjczQjYyQUFEOTE4M0UzMTE5NDhCRDEzRTJCRTc5OUExPC9zdFJlZjpvcmlnaW5hbERvY3VtZW50SUQ+CiAgICAgICAgIDwveG1wTU06RGVyaXZlZEZyb20+CiAgICAgICAgIDxwaG90b3Nob3A6Q29sb3JNb2RlPjM8L3Bob3Rvc2hvcDpDb2xvck1vZGU+CiAgICAgICAgIDxwaG90b3Nob3A6SUNDUHJvZmlsZT5zUkdCIElFQzYxOTY2LTIuMTwvcGhvdG9zaG9wOklDQ1Byb2ZpbGU+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgICAgIDx0aWZmOlhSZXNvbHV0aW9uPjcyMDA5MC8xMDAwMDwvdGlmZjpYUmVzb2x1dGlvbj4KICAgICAgICAgPHRpZmY6WVJlc29sdXRpb24+NzIwMDkwLzEwMDAwPC90aWZmOllSZXNvbHV0aW9uPgogICAgICAgICA8dGlmZjpSZXNvbHV0aW9uVW5pdD4yPC90aWZmOlJlc29sdXRpb25Vbml0PgogICAgICAgICA8ZXhpZjpDb2xvclNwYWNlPjE8L2V4aWY6Q29sb3JTcGFjZT4KICAgICAgICAgPGV4aWY6UGl4ZWxYRGltZW5zaW9uPjI0PC9leGlmOlBpeGVsWERpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6UGl4ZWxZRGltZW5zaW9uPjI0PC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+CiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgCjw/eHBhY2tldCBlbmQ9InciPz4ubxs+AAAAIGNIUk0AAHolAACAgwAA+f8AAIDpAAB1MAAA6mAAADqYAAAXb5JfxUYAAAUtSURBVHjajJZ/bNT1Gcdfz/P53PV6B4W7VltLqdAaplIOiMOoyxxJCSs/Gv/yB4gzJroAosmmDklwkYWR0bQsdmkykoojTpcsWYLxD/lRZdMQkTHRtkLZRqG0tIVe7662vTu43n32x/VKZ/jh89cn38/zvN7P5/l88zwf2blzJz6fDwARQUSm1n8s31CM0/VAnbNmsUPuAsDpgEO+Bg4C7//iyv5hvmMiQiqVQpqamvB6vVNwEeG1JZtCBrYi/MrkAwDNgjhwAlbzICBLA0rDb0+/839C6XQaaWxspLCw8Dp86cbNmqVFJQddE6KzdjZ9D89g+B6fSyCOcyn1nxil+O9xKg5HqWFSHGXLjrP7W/ICqVQK2bNnDz6fDxFh65KNvxbHDhF4rJj2bXPo+IGfcW5h5xL4f99P+FCEMIAob75x9t0dAMlkElNXV4e1lteXbNqiQoMaeOFOjrdU868SD2luYyEP6dUh+sYmSHeOU6GO5Z8VLx5+NNZxIpPJ5AS2L3upROCoCvz8Lo7vnkf77cAHhpiz/zIL9vWz8L8p/NvupmM0Q7pjnAoLqz8tDrc8MnQqYVUVhVdF4LEg7b+rvDn8wDDlH0WoPpukLJImSBaMwjcJqmwWts2jPZLG/8kwYVFeVdXXZcFf4yVDc2cNKfBFmD9X+0ncCP58F48eG+Feo2CAUkvs4dl0V/uJvdXLiiV+ut++n7YLSfxPfMMG54ChzB3WIesVWB2i82bw1AR6fJR7C4VsfYiv6u/k3A9nEgP4zXke8DiYHyAOMK+QxPIgnZ9GqSHr1itQJ8DK2fTerDQ+S/bHRXQJaHSCwNIZ2Xh+7+S3VAmwNMBA/tuPZtErgKquUmdMWIFlRURvdamRNEXGwIWrlP47pTMzLiunxghGMwTLvcTWlHAp77s4QNSrYMQtss6ZMgWqCm5cHoDHO1nbk6K8zEN8+3zatv2Hn1b59EqJZdxmYUERg9P9KwpIiAOTdWUWBXuLzB/vZG3P1Un4PNp2d1MbmyD45TWCxuCsQm0x56bHGHFYEZwxok7toAA9Sfw3hCcoL/NOwi9QO5wmWO1j4JEgZxTkodmcWRGkf3pcX0r8xoAaBixKu4U5/xwndM+0tpAvS6mP+PZK2nb1UBvPEKwKMLDvPj4ESGc55lGy303sdJKQdZB2rkMdctAB/4gzN+/Q2ENNd4LyUi/xN+bTtquX2thk5nk4wI3gAF+OMNcA1nFQDfK+BY5GqbkwWabTY5QZhXWlnNx1ntrY1Rz87fuvw29m/Sn8J+PUGAFj5T19baA1IspuBZp7cx1x4SwG1cEf+lgRSROs8jGwb+Ht4QB/GSSsAhYano39LWIBxNEIbP14hPDuiyS2VtJuHXQlKKvxM/jiXDq/D/xPlwifGMkJZB2NIoKpr69nxeiZxLHicFSFVWfGqBidIP3LSjrWltD94CyufF/4kQgPuVz2Lz93+dDRa9eu5QQ8Hg8/iXee+Dy4CKMs7xqn4nwKz9IirhQqmVuB42m8ey+x7LMoD6iAON782eChhqmRuXfvXgKBAKqKqtI0/8nNKrQI4BVYXkzHgzPpC88gWuHL/caXrhLoGiN0apSKr0ZZRBZM7q2w5ZnLR1oAnHOMjY0hra2tFBQUYIyZmstvVT1Z6eDlAuEVq7merxmwueNPDXy9PvybjKP5mctHLk4/XTKZRJqbm/H7/VNw1VyEMYbW4FN3WNWnnchKoy5sHeVGBRX6VWi3ymFx7r11Ix8MTX/y5C2RSPC/AQB61erowbpqSwAAAABJRU5ErkJggg==")}div.vis-network div.vis-manipulation button.vis-button.vis-delete{background-image:url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAACXBIWXMAAAsTAAALEwEAmpwYAAAKT2lDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVNnVFPpFj333vRCS4iAlEtvUhUIIFJCi4AUkSYqIQkQSoghodkVUcERRUUEG8igiAOOjoCMFVEsDIoK2AfkIaKOg6OIisr74Xuja9a89+bN/rXXPues852zzwfACAyWSDNRNYAMqUIeEeCDx8TG4eQuQIEKJHAAEAizZCFz/SMBAPh+PDwrIsAHvgABeNMLCADATZvAMByH/w/qQplcAYCEAcB0kThLCIAUAEB6jkKmAEBGAYCdmCZTAKAEAGDLY2LjAFAtAGAnf+bTAICd+Jl7AQBblCEVAaCRACATZYhEAGg7AKzPVopFAFgwABRmS8Q5ANgtADBJV2ZIALC3AMDOEAuyAAgMADBRiIUpAAR7AGDIIyN4AISZABRG8lc88SuuEOcqAAB4mbI8uSQ5RYFbCC1xB1dXLh4ozkkXKxQ2YQJhmkAuwnmZGTKBNA/g88wAAKCRFRHgg/P9eM4Ors7ONo62Dl8t6r8G/yJiYuP+5c+rcEAAAOF0ftH+LC+zGoA7BoBt/qIl7gRoXgugdfeLZrIPQLUAoOnaV/Nw+H48PEWhkLnZ2eXk5NhKxEJbYcpXff5nwl/AV/1s+X48/Pf14L7iJIEyXYFHBPjgwsz0TKUcz5IJhGLc5o9H/LcL//wd0yLESWK5WCoU41EScY5EmozzMqUiiUKSKcUl0v9k4t8s+wM+3zUAsGo+AXuRLahdYwP2SycQWHTA4vcAAPK7b8HUKAgDgGiD4c93/+8//UegJQCAZkmScQAAXkQkLlTKsz/HCAAARKCBKrBBG/TBGCzABhzBBdzBC/xgNoRCJMTCQhBCCmSAHHJgKayCQiiGzbAdKmAv1EAdNMBRaIaTcA4uwlW4Dj1wD/phCJ7BKLyBCQRByAgTYSHaiAFiilgjjggXmYX4IcFIBBKLJCDJiBRRIkuRNUgxUopUIFVIHfI9cgI5h1xGupE7yAAygvyGvEcxlIGyUT3UDLVDuag3GoRGogvQZHQxmo8WoJvQcrQaPYw2oefQq2gP2o8+Q8cwwOgYBzPEbDAuxsNCsTgsCZNjy7EirAyrxhqwVqwDu4n1Y8+xdwQSgUXACTYEd0IgYR5BSFhMWE7YSKggHCQ0EdoJNwkDhFHCJyKTqEu0JroR+cQYYjIxh1hILCPWEo8TLxB7iEPENyQSiUMyJ7mQAkmxpFTSEtJG0m5SI+ksqZs0SBojk8naZGuyBzmULCAryIXkneTD5DPkG+Qh8lsKnWJAcaT4U+IoUspqShnlEOU05QZlmDJBVaOaUt2ooVQRNY9aQq2htlKvUYeoEzR1mjnNgxZJS6WtopXTGmgXaPdpr+h0uhHdlR5Ol9BX0svpR+iX6AP0dwwNhhWDx4hnKBmbGAcYZxl3GK+YTKYZ04sZx1QwNzHrmOeZD5lvVVgqtip8FZHKCpVKlSaVGyovVKmqpqreqgtV81XLVI+pXlN9rkZVM1PjqQnUlqtVqp1Q61MbU2epO6iHqmeob1Q/pH5Z/YkGWcNMw09DpFGgsV/jvMYgC2MZs3gsIWsNq4Z1gTXEJrHN2Xx2KruY/R27iz2qqaE5QzNKM1ezUvOUZj8H45hx+Jx0TgnnKKeX836K3hTvKeIpG6Y0TLkxZVxrqpaXllirSKtRq0frvTau7aedpr1Fu1n7gQ5Bx0onXCdHZ4/OBZ3nU9lT3acKpxZNPTr1ri6qa6UbobtEd79up+6Ynr5egJ5Mb6feeb3n+hx9L/1U/W36p/VHDFgGswwkBtsMzhg8xTVxbzwdL8fb8VFDXcNAQ6VhlWGX4YSRudE8o9VGjUYPjGnGXOMk423GbcajJgYmISZLTepN7ppSTbmmKaY7TDtMx83MzaLN1pk1mz0x1zLnm+eb15vft2BaeFostqi2uGVJsuRaplnutrxuhVo5WaVYVVpds0atna0l1rutu6cRp7lOk06rntZnw7Dxtsm2qbcZsOXYBtuutm22fWFnYhdnt8Wuw+6TvZN9un2N/T0HDYfZDqsdWh1+c7RyFDpWOt6azpzuP33F9JbpL2dYzxDP2DPjthPLKcRpnVOb00dnF2e5c4PziIuJS4LLLpc+Lpsbxt3IveRKdPVxXeF60vWdm7Obwu2o26/uNu5p7ofcn8w0nymeWTNz0MPIQ+BR5dE/C5+VMGvfrH5PQ0+BZ7XnIy9jL5FXrdewt6V3qvdh7xc+9j5yn+M+4zw33jLeWV/MN8C3yLfLT8Nvnl+F30N/I/9k/3r/0QCngCUBZwOJgUGBWwL7+Hp8Ib+OPzrbZfay2e1BjKC5QRVBj4KtguXBrSFoyOyQrSH355jOkc5pDoVQfujW0Adh5mGLw34MJ4WHhVeGP45wiFga0TGXNXfR3ENz30T6RJZE3ptnMU85ry1KNSo+qi5qPNo3ujS6P8YuZlnM1VidWElsSxw5LiquNm5svt/87fOH4p3iC+N7F5gvyF1weaHOwvSFpxapLhIsOpZATIhOOJTwQRAqqBaMJfITdyWOCnnCHcJnIi/RNtGI2ENcKh5O8kgqTXqS7JG8NXkkxTOlLOW5hCepkLxMDUzdmzqeFpp2IG0yPTq9MYOSkZBxQqohTZO2Z+pn5mZ2y6xlhbL+xW6Lty8elQfJa7OQrAVZLQq2QqboVFoo1yoHsmdlV2a/zYnKOZarnivN7cyzytuQN5zvn//tEsIS4ZK2pYZLVy0dWOa9rGo5sjxxedsK4xUFK4ZWBqw8uIq2Km3VT6vtV5eufr0mek1rgV7ByoLBtQFr6wtVCuWFfevc1+1dT1gvWd+1YfqGnRs+FYmKrhTbF5cVf9go3HjlG4dvyr+Z3JS0qavEuWTPZtJm6ebeLZ5bDpaql+aXDm4N2dq0Dd9WtO319kXbL5fNKNu7g7ZDuaO/PLi8ZafJzs07P1SkVPRU+lQ27tLdtWHX+G7R7ht7vPY07NXbW7z3/T7JvttVAVVN1WbVZftJ+7P3P66Jqun4lvttXa1ObXHtxwPSA/0HIw6217nU1R3SPVRSj9Yr60cOxx++/p3vdy0NNg1VjZzG4iNwRHnk6fcJ3/ceDTradox7rOEH0x92HWcdL2pCmvKaRptTmvtbYlu6T8w+0dbq3nr8R9sfD5w0PFl5SvNUyWna6YLTk2fyz4ydlZ19fi753GDborZ752PO32oPb++6EHTh0kX/i+c7vDvOXPK4dPKy2+UTV7hXmq86X23qdOo8/pPTT8e7nLuarrlca7nuer21e2b36RueN87d9L158Rb/1tWeOT3dvfN6b/fF9/XfFt1+cif9zsu72Xcn7q28T7xf9EDtQdlD3YfVP1v+3Njv3H9qwHeg89HcR/cGhYPP/pH1jw9DBY+Zj8uGDYbrnjg+OTniP3L96fynQ89kzyaeF/6i/suuFxYvfvjV69fO0ZjRoZfyl5O/bXyl/erA6xmv28bCxh6+yXgzMV70VvvtwXfcdx3vo98PT+R8IH8o/2j5sfVT0Kf7kxmTk/8EA5jz/GMzLdsAAEEOaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCI/Pgo8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJBZG9iZSBYTVAgQ29yZSA1LjUtYzAyMSA3OS4xNTQ5MTEsIDIwMTMvMTAvMjktMTE6NDc6MTYgICAgICAgICI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyIKICAgICAgICAgICAgeG1sbnM6ZGM9Imh0dHA6Ly9wdXJsLm9yZy9kYy9lbGVtZW50cy8xLjEvIgogICAgICAgICAgICB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIKICAgICAgICAgICAgeG1sbnM6c3RFdnQ9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZUV2ZW50IyIKICAgICAgICAgICAgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiCiAgICAgICAgICAgIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIKICAgICAgICAgICAgeG1sbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90aWZmLzEuMC8iCiAgICAgICAgICAgIHhtbG5zOmV4aWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vZXhpZi8xLjAvIj4KICAgICAgICAgPHhtcDpDcmVhdG9yVG9vbD5BZG9iZSBQaG90b3Nob3AgQ0MgKFdpbmRvd3MpPC94bXA6Q3JlYXRvclRvb2w+CiAgICAgICAgIDx4bXA6Q3JlYXRlRGF0ZT4yMDE0LTAxLTIyVDE5OjI0OjUxKzAxOjAwPC94bXA6Q3JlYXRlRGF0ZT4KICAgICAgICAgPHhtcDpNZXRhZGF0YURhdGU+MjAxNC0wMi0wNFQxNDo0MTowNCswMTowMDwveG1wOk1ldGFkYXRhRGF0ZT4KICAgICAgICAgPHhtcDpNb2RpZnlEYXRlPjIwMTQtMDItMDRUMTQ6NDE6MDQrMDE6MDA8L3htcDpNb2RpZnlEYXRlPgogICAgICAgICA8ZGM6Zm9ybWF0PmltYWdlL3BuZzwvZGM6Zm9ybWF0PgogICAgICAgICA8eG1wTU06SW5zdGFuY2VJRD54bXAuaWlkOjc3NDkzYmUxLTEyZGItOTg0NC1iNDYyLTg2NGVmNGIzMzM3MTwveG1wTU06SW5zdGFuY2VJRD4KICAgICAgICAgPHhtcE1NOkRvY3VtZW50SUQ+eG1wLmRpZDpFQTc2MkY5Njc0ODNFMzExOTQ4QkQxM0UyQkU3OTlBMTwveG1wTU06RG9jdW1lbnRJRD4KICAgICAgICAgPHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD54bXAuZGlkOjczQjYyQUFEOTE4M0UzMTE5NDhCRDEzRTJCRTc5OUExPC94bXBNTTpPcmlnaW5hbERvY3VtZW50SUQ+CiAgICAgICAgIDx4bXBNTTpIaXN0b3J5PgogICAgICAgICAgICA8cmRmOlNlcT4KICAgICAgICAgICAgICAgPHJkZjpsaSByZGY6cGFyc2VUeXBlPSJSZXNvdXJjZSI+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDphY3Rpb24+Y3JlYXRlZDwvc3RFdnQ6YWN0aW9uPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6aW5zdGFuY2VJRD54bXAuaWlkOjczQjYyQUFEOTE4M0UzMTE5NDhCRDEzRTJCRTc5OUExPC9zdEV2dDppbnN0YW5jZUlEPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6d2hlbj4yMDE0LTAxLTIyVDE5OjI0OjUxKzAxOjAwPC9zdEV2dDp3aGVuPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6c29mdHdhcmVBZ2VudD5BZG9iZSBQaG90b3Nob3AgQ1M2IChXaW5kb3dzKTwvc3RFdnQ6c29mdHdhcmVBZ2VudD4KICAgICAgICAgICAgICAgPC9yZGY6bGk+CiAgICAgICAgICAgICAgIDxyZGY6bGkgcmRmOnBhcnNlVHlwZT0iUmVzb3VyY2UiPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6YWN0aW9uPnNhdmVkPC9zdEV2dDphY3Rpb24+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDppbnN0YW5jZUlEPnhtcC5paWQ6RUE2MEEyNEUxOTg0RTMxMUFEQUZFRkU2RUMzMzNFMDM8L3N0RXZ0Omluc3RhbmNlSUQ+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDp3aGVuPjIwMTQtMDEtMjNUMTk6MTg6MDcrMDE6MDA8L3N0RXZ0OndoZW4+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpzb2Z0d2FyZUFnZW50PkFkb2JlIFBob3Rvc2hvcCBDUzYgKFdpbmRvd3MpPC9zdEV2dDpzb2Z0d2FyZUFnZW50PgogICAgICAgICAgICAgICAgICA8c3RFdnQ6Y2hhbmdlZD4vPC9zdEV2dDpjaGFuZ2VkPgogICAgICAgICAgICAgICA8L3JkZjpsaT4KICAgICAgICAgICAgICAgPHJkZjpsaSByZGY6cGFyc2VUeXBlPSJSZXNvdXJjZSI+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDphY3Rpb24+c2F2ZWQ8L3N0RXZ0OmFjdGlvbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0Omluc3RhbmNlSUQ+eG1wLmlpZDowNmE3NWYwMy04MDdhLWUzNGYtYjk1Zi1jZGU2MjM0Mzg4OGY8L3N0RXZ0Omluc3RhbmNlSUQ+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDp3aGVuPjIwMTQtMDItMDRUMTQ6NDE6MDQrMDE6MDA8L3N0RXZ0OndoZW4+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpzb2Z0d2FyZUFnZW50PkFkb2JlIFBob3Rvc2hvcCBDQyAoV2luZG93cyk8L3N0RXZ0OnNvZnR3YXJlQWdlbnQ+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpjaGFuZ2VkPi88L3N0RXZ0OmNoYW5nZWQ+CiAgICAgICAgICAgICAgIDwvcmRmOmxpPgogICAgICAgICAgICAgICA8cmRmOmxpIHJkZjpwYXJzZVR5cGU9IlJlc291cmNlIj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmFjdGlvbj5jb252ZXJ0ZWQ8L3N0RXZ0OmFjdGlvbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OnBhcmFtZXRlcnM+ZnJvbSBhcHBsaWNhdGlvbi92bmQuYWRvYmUucGhvdG9zaG9wIHRvIGltYWdlL3BuZzwvc3RFdnQ6cGFyYW1ldGVycz4KICAgICAgICAgICAgICAgPC9yZGY6bGk+CiAgICAgICAgICAgICAgIDxyZGY6bGkgcmRmOnBhcnNlVHlwZT0iUmVzb3VyY2UiPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6YWN0aW9uPmRlcml2ZWQ8L3N0RXZ0OmFjdGlvbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OnBhcmFtZXRlcnM+Y29udmVydGVkIGZyb20gYXBwbGljYXRpb24vdm5kLmFkb2JlLnBob3Rvc2hvcCB0byBpbWFnZS9wbmc8L3N0RXZ0OnBhcmFtZXRlcnM+CiAgICAgICAgICAgICAgIDwvcmRmOmxpPgogICAgICAgICAgICAgICA8cmRmOmxpIHJkZjpwYXJzZVR5cGU9IlJlc291cmNlIj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmFjdGlvbj5zYXZlZDwvc3RFdnQ6YWN0aW9uPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6aW5zdGFuY2VJRD54bXAuaWlkOjc3NDkzYmUxLTEyZGItOTg0NC1iNDYyLTg2NGVmNGIzMzM3MTwvc3RFdnQ6aW5zdGFuY2VJRD4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OndoZW4+MjAxNC0wMi0wNFQxNDo0MTowNCswMTowMDwvc3RFdnQ6d2hlbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OnNvZnR3YXJlQWdlbnQ+QWRvYmUgUGhvdG9zaG9wIENDIChXaW5kb3dzKTwvc3RFdnQ6c29mdHdhcmVBZ2VudD4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmNoYW5nZWQ+Lzwvc3RFdnQ6Y2hhbmdlZD4KICAgICAgICAgICAgICAgPC9yZGY6bGk+CiAgICAgICAgICAgIDwvcmRmOlNlcT4KICAgICAgICAgPC94bXBNTTpIaXN0b3J5PgogICAgICAgICA8eG1wTU06RGVyaXZlZEZyb20gcmRmOnBhcnNlVHlwZT0iUmVzb3VyY2UiPgogICAgICAgICAgICA8c3RSZWY6aW5zdGFuY2VJRD54bXAuaWlkOjA2YTc1ZjAzLTgwN2EtZTM0Zi1iOTVmLWNkZTYyMzQzODg4Zjwvc3RSZWY6aW5zdGFuY2VJRD4KICAgICAgICAgICAgPHN0UmVmOmRvY3VtZW50SUQ+eG1wLmRpZDpFQTc2MkY5Njc0ODNFMzExOTQ4QkQxM0UyQkU3OTlBMTwvc3RSZWY6ZG9jdW1lbnRJRD4KICAgICAgICAgICAgPHN0UmVmOm9yaWdpbmFsRG9jdW1lbnRJRD54bXAuZGlkOjczQjYyQUFEOTE4M0UzMTE5NDhCRDEzRTJCRTc5OUExPC9zdFJlZjpvcmlnaW5hbERvY3VtZW50SUQ+CiAgICAgICAgIDwveG1wTU06RGVyaXZlZEZyb20+CiAgICAgICAgIDxwaG90b3Nob3A6Q29sb3JNb2RlPjM8L3Bob3Rvc2hvcDpDb2xvck1vZGU+CiAgICAgICAgIDxwaG90b3Nob3A6SUNDUHJvZmlsZT5zUkdCIElFQzYxOTY2LTIuMTwvcGhvdG9zaG9wOklDQ1Byb2ZpbGU+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgICAgIDx0aWZmOlhSZXNvbHV0aW9uPjcyMDA5MC8xMDAwMDwvdGlmZjpYUmVzb2x1dGlvbj4KICAgICAgICAgPHRpZmY6WVJlc29sdXRpb24+NzIwMDkwLzEwMDAwPC90aWZmOllSZXNvbHV0aW9uPgogICAgICAgICA8dGlmZjpSZXNvbHV0aW9uVW5pdD4yPC90aWZmOlJlc29sdXRpb25Vbml0PgogICAgICAgICA8ZXhpZjpDb2xvclNwYWNlPjE8L2V4aWY6Q29sb3JTcGFjZT4KICAgICAgICAgPGV4aWY6UGl4ZWxYRGltZW5zaW9uPjI0PC9leGlmOlBpeGVsWERpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6UGl4ZWxZRGltZW5zaW9uPjI0PC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+CiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgCjw/eHBhY2tldCBlbmQ9InciPz4aYJzYAAAAIGNIUk0AAHolAACAgwAA+f8AAIDpAAB1MAAA6mAAADqYAAAXb5JfxUYAAAYGSURBVHjalJZ7UJTnFcZ/73m/72PdJY1RbhoQp6lkXRAvmIYxdCUadLVOozPNtGObap1JsKipjiShbdoRbeKEiQHpQK3xj0xa03aamTbaTGyAYV1QGeqFi+JyiZFLAlmESBkWRmS3fyzslGkmnZ5/v/M873Oe75zzvqqoqAibzQaAiKCUAkApRdHIK/NFsx2NR91nOSILADDoJyzNaM4xxbtvPHh0iC+JiYkJ1OHDh4mJiUEpFSXPv/ziPC28TIiXDCOSrAClQDSEpsCwJPIhrEBRQpiSytXlQwDhcBilFPfu3UMVFxdjt9ujFTzfcLBADCoEEAFr1ZbrrNjch2vtEImPBgHob7fTcWE+bVXJNJ/NiFQlEGLvieXHKmYqGB8fRx05cgSbzYaIsPvywV8pKFaA7fGtLTzz61YWpo/xVTHQbufsq5lcez9zWuWhk5mvFwMEg0H0+vXrMU2Tn1wp3CtCiQ5DjGd3A/m/v8IDCZP8r4iNmyRrWx/j/5qktykZpXKzAjVDVxPzGqemptDr1q1jX3NRnIJarcDKK2hgR2ULXRfncv7UYv7xpovhnhiW5Mz+kefeSKO6LJ1A1xzEuk/Ojm4mRibpuZaMZW3OCtRUND60NmiICCIUShisx7a2sLMiQn4s77uEQgIabnqdfHIlgT1/qQeg8vs5dHhdCNB1wYn3RIiC995j26stjAbsNH+YiZJCESnS1Y/XxIXu8r4YIPv/VkVs3CTnTy2ms34xro1+sp9po6sxlTu34ultmsPVvy6is86FCHgO+DDs49zpjufBpCG+seYOC9OHaTidieicb9ouVAhKtouAseI710ma7pLuqwmgYfHqAFt+6WdLoQ/LBl11Lm7VudAa8vb72PCin9TlAWIsGGhLACD+kSAZnusYBii1XQAPYWDllt6ov2lrBkDBR2+6Ofuak2//3M+G/T4wAAPW7fPhKfRTVeqk9qQbFKRmDUTxS3N7QYGYmwzCkqklBGlPDEcTNv+sg9tNCbTXuvBWujE0bHrZj9JE1B/wU1Pm5PwJN6YBS9a2kVvQEcWnrh5GTFD3lxkYkqRMgYQlwVldUvDnen73LHTUuqitdKM0eAr9AFQfd1J/yo2aJn+2sn4Wdn5qEFODJskgBIjx5T0uCrQA08pnIjS9PERDjPnfOKXAMEBECUoGEIHBj+2zkt76UQ6dXheGAev3+cg74Kf6uJPqcicbfuond7cPy4SOiy7+tD9nFvZurx00KOk3CNEC+mE+vjSPBc7IWqgqTaPT60IMcO/xsXGa3HfKjRgRdbl7/KDg0jtubje6aHj7c7J3dgLQ2zoPwwQ91SooOQdAW1VKVMHty0kA5Bb48BycJn/LjWFGbLv4thvvb53kFvjJ+XEdWkPfjQVR/CcNKYgGMc8JWt5Fa2j+MIPPuyI2pa4IoHSkt6vLIuRaQ9q32khzt4GCxtNu6k46GeiIR2lIfDQQsafPzq1LGRGL9Gk9d+vrwewvfHPQOoexQVjxdB/auk/zmaUMdsfz6bVUtIalT7bxveP1ZHh6GPDPYeSzeD69kcpIfxymFWLNrka+ljhBTWkWwz2JiJT84YHnz2iPx0P20PkmRF5i6HYiwZFJsn/YzdezbzE3cQibY5xV266z6RfXohakb+xB9CjanCD9qTbW7Grk4WV38VZm0l6dhQiEw9taHSuDqrS0FIfDwXM3X9mHMsvRAk/sauDpQy38P+GtzOTGB9mEpkD0C2dS8n8zOjqK9ng8WJZFU+JTjasGvaCNXPpvJBPoMlm0OoDNMfWVxONfWNSUPUZ7TUQ56tCZlPwSgMnJSVRpaSmxsbFE1raw82ZxAZZRQUiBYUKGp5UlOX2krBzmoUVjiIKhHge9rfPo+Wcy3ZeXIYASgL1/X5RfMXMvj46OosrLy7HZbGitUUohIuzoem0RofALaOsghgWGjky0MiJTL8b0lOvI8hN1DKXKP0jd3TNTWDgcJhgMoo4ePYrD4Yi+KmaeLlprnrtXFo9h/AAlG1AqE8yFmBrC+jO0bgH9EVpO/1F2Dc5g//OAsbEx/j0Af+USsQynL1UAAAAASUVORK5CYII=")}div.vis-network div.vis-edit-mode div.vis-label,div.vis-network div.vis-manipulation div.vis-label{line-height:25px;margin:0 0 0 23px}div.vis-network div.vis-manipulation div.vis-separator-line{background-color:#bdbdbd;display:inline-block;float:left;height:21px;margin:0 7px 0 15px;width:1px} \ No newline at end of file diff --git a/spaces/nasttam/Image-and-3D-Model-Creator/PIFu/lib/renderer/gl/init_gl.py b/spaces/nasttam/Image-and-3D-Model-Creator/PIFu/lib/renderer/gl/init_gl.py deleted file mode 100644 index 1d2c7e6ba0be20136b2be2e2f644894bee4af9c1..0000000000000000000000000000000000000000 --- a/spaces/nasttam/Image-and-3D-Model-Creator/PIFu/lib/renderer/gl/init_gl.py +++ /dev/null @@ -1,24 +0,0 @@ -_glut_window = None -_context_inited = None - -def initialize_GL_context(width=512, height=512, egl=False): - ''' - default context uses GLUT - ''' - if not egl: - import OpenGL.GLUT as GLUT - display_mode = GLUT.GLUT_DOUBLE | GLUT.GLUT_RGB | GLUT.GLUT_DEPTH - global _glut_window - if _glut_window is None: - GLUT.glutInit() - GLUT.glutInitDisplayMode(display_mode) - GLUT.glutInitWindowSize(width, height) - GLUT.glutInitWindowPosition(0, 0) - _glut_window = GLUT.glutCreateWindow("My Render.") - else: - from .glcontext import create_opengl_context - global _context_inited - if _context_inited is None: - create_opengl_context((width, height)) - _context_inited = True - diff --git a/spaces/nateraw/lavila/datasets/README.md b/spaces/nateraw/lavila/datasets/README.md deleted file mode 100644 index 966bb4120d2ba0a2c2d4a5fb1468cd6ce80d6f7d..0000000000000000000000000000000000000000 --- a/spaces/nateraw/lavila/datasets/README.md +++ /dev/null @@ -1,153 +0,0 @@ -# Preparing datasets for LAVILA - -Please download the (selected) datasets from the official websites and place or sim-link them under `$LAVILA_ROOT/datasets/`. - -```bash -$LAVILA_ROOT/datasets/ - CharadesEgo/ - EGTEA/ - EK100/ - Ego4D/ -``` - -## Ego4D -1. Download [Ego4D videos](https://ego4d-data.org/docs/start-here/#download-data) (license is required). - -2. Preprocess(TBA) - -3. Download annotations - - a. Download [egomcq.json](https://drive.google.com/file/d/1-5iRYf4BCHmj4MYQYFRMY4bhsWJUN3rW/view) to `$LAVILA_ROOT/datasets/Ego4D` (if you want to evaluate EgoMCQ). - - b. Download [metadata for train split](https://dl.fbaipublicfiles.com/lavila/metadata/ego4d/ego4d_train.pkl) and [val split](https://dl.fbaipublicfiles.com/lavila/metadata/ego4d/ego4d_val.pkl) to `$LAVILA_ROOT/datasets/Ego4D` ((if you want to train LAVILA from scratch). - -The fold should look like this: -```bash -$LAVILA_ROOT/datasets/ - Ego4D/ - ego4d_train.pkl - ego4d_val.pkl - egomcq.json - video_288px/ - 000786a7-3f9d-4fe6-bfb3-045b368f7d44.mp4/ - 0.mp4 - 300.mp4 - 000a3525-6c98-4650-aaab-be7d2c7b9402.mp4/ - 0.mp4 - ... -``` - - -## EPIC-Kitchens-100 (EK-100) - -1. Download annotations - -```bash -# Assume that you are under `datasets/EK100/` -git clone https://github.com/epic-kitchens/epic-kitchens-100-annotations -``` - -2. Download videos. - - a. For raw videos, please download them from [https://epic-kitchens.github.io/](https://epic-kitchens.github.io/). - - b. (Recommended) The raw videos are huge (~1 TB). As an alternative, please check out a [resized version](). - -3. (For EK-100 MIR) - - a. Generate the relevancy matrix of train/val splits using [the official code](https://github.com/mwray/Joint-Part-of-Speech-Embeddings). - - b. (Recommended) The generated result has some randomness. Therefore, we also provide the [replica of train split](https://dl.fbaipublicfiles.com/lavila/metadata/EK100/caption_relevancy_EPIC_100_retrieval_train.pkl) and [val split](https://dl.fbaipublicfiles.com/lavila/metadata/EK100/caption_relevancy_EPIC_100_retrieval_test.pkl). Please put them to the folder `$LAVILA_ROOT/datasets/EK100/epic-kitchens-100-annotations/retrieval_annotations/relevancy/`. - - -The folder should look like this: -```bash -$LAVILA_ROOT/datasets/ - EK100/ - epic-kitchens-100-annotations/ - EPIC_100_train.csv - EPIC_100_validation.csv - ... - retrieval_annotations/relevancy/ # this appears if you do 3. - caption_relevancy_EPIC_100_retrieval_train.pkl - caption_relevancy_EPIC_100_retrieval_test.pkl - video_ht256px/ - P01/ - P01_01.MP4 - P01_02.MP4 - ... - P01_19.MP4 - P02/ - P02_01.MP4 - P02_02.MP4 - ... - P02_15.MP4 - ... -``` - -## CharadesEgo - -1. Download annotations at [https://prior.allenai.org/projects/charades-ego](https://prior.allenai.org/projects/charades-ego). -```bash -### Annotations -# Assume that you are under `datasets/CharadesEgo/` -wget https://ai2-public-datasets.s3-us-west-2.amazonaws.com/charades/CharadesEgo.zip -unzip CharadesEgo.zip && rm CharadesEgo.zip -``` - -2. Download data (~11GB) at [https://prior.allenai.org/projects/charades-ego](https://prior.allenai.org/projects/charades-ego). -```bash -### Data -wget https://ai2-public-datasets.s3-us-west-2.amazonaws.com/charades/CharadesEgo_v1_480.tar -tar -xvf CharadesEgo_v1_480.tar # Or specify an external path using `-C` and sim-link it to here -rm CharadesEgo_v1_480.tar -``` - -3. (For fine-tuning CharadesEgo) Download two additional metadata files: [clip-level metadata (train)](https://dl.fbaipublicfiles.com/lavila/metadata/CharadesEgo/metadata_filtered_train.pkl) and [clip-level metadata (val)](https://dl.fbaipublicfiles.com/lavila/metadata/CharadesEgo/metadata_filtered_val.pkl). Put them to the folder `$LAVILA_ROOT/datasets/CharadesEgo/CharadesEgo/`. - -The folder should look like this: -```bash -$LAVILA_ROOT/datasets/ - CharadesEgo/ - CharadesEgo/ - CharadesEgo_v1_train_only1st.csv - CharadesEgo_v1_test_only1st.csv - ... - metadata_filtered_train.pkl # this appears if you do 3. - metadata_filtered_val.pkl # this appears if you do 3. - CharadesEgo_v1_480/ - 005BU.mp4 - 005BUEGO.mp4 - ... -``` - - -## EGTEA - -1. Visit [https://cbs.ic.gatech.edu/fpv/](https://cbs.ic.gatech.edu/fpv/). - -2. Download `TRIMMED_ACTION_CLIPS` (~20GB) and `ACTION_ANNOTATIONS` and untar to the current folder `$LAVILA_ROOT/datasets/EGTEA`. - -```bash -unzip action_annotation.zip -d EGTEA/ && rm action_annotation.zip -``` - -The folder should look like this: -```bash -$LAVILA_ROOT/datasets/ - EGTEA/ - train_split1.txt - test_split1.txt - cropped_clips/ - OP01-R01-PastaSalad/ - OP01-R01-PastaSalad-1002316-1004005-F024051-F024101.mp4 - OP01-R01-PastaSalad-1004110-1021110-F024057-F024548.mp4 - OP01-R01-PastaSalad-1022590-1024050-F024539-F024581.mp4 - ... - OP01-R02-TurkeySandwich/ - OP01-R02-TurkeySandwich-102320-105110-F002449-F002529.mp4 - OP01-R02-TurkeySandwich-105440-106460-F002528-F002558.mp4 - OP01-R02-TurkeySandwich-107332-133184-F002513-F003259.mp4 - ... - ... -``` diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/3planesoft Screensaver Manager Serial LINK Keygen.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/3planesoft Screensaver Manager Serial LINK Keygen.md deleted file mode 100644 index 72690005ba53aec1f3ac85d3ea5fd050d43a89e2..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/3planesoft Screensaver Manager Serial LINK Keygen.md +++ /dev/null @@ -1,32 +0,0 @@ -
      -Here is what I created: - -

      How to Activate 3Planesoft Screensaver Manager with Serial Keygen

      -

      If you are looking for a way to enjoy various 3D screensavers on your PC, you might want to try 3Planesoft Screensaver Manager. This software allows you to easily manage and switch between different 3Planesoft screensavers, as well as customize their settings and appearance. However, to use this software, you need to activate it with a valid serial keygen.

      -

      A serial keygen is a program that generates unique serial numbers or activation codes for a specific software. You can use a serial keygen to activate 3Planesoft Screensaver Manager and unlock its full features. However, you should be careful when downloading and using serial keygens from the internet, as some of them might contain malware or viruses that can harm your computer.

      -

      3planesoft screensaver manager serial keygen


      Download Zip 🗸🗸🗸 https://urlcod.com/2uIaa2



      -

      In this article, we will show you how to find and use a safe and reliable serial keygen for 3Planesoft Screensaver Manager. Follow these steps:

      -
        -
      1. Download 3Planesoft Screensaver Manager from the official website: https://www.3planesoft.com/screensaver-manager/. Install and run the software on your PC.
      2. -
      3. Go to https://www.serials.ws/, a trusted website that provides serial numbers and keygens for various software. In the search box, type "3Planesoft Screensaver Manager" and click on the search button.
      4. -
      5. You will see a list of results with different versions and serial numbers for 3Planesoft Screensaver Manager. Choose the one that matches your software version and copy the serial number.
      6. -
      7. Go back to 3Planesoft Screensaver Manager and click on the "Register" button at the top right corner of the main window. Paste the serial number in the registration box and click on the "OK" button.
      8. -
      9. You will see a confirmation message that says "Thank you for registering 3Planesoft Screensaver Manager". You have successfully activated the software and can now enjoy all its features and functions.
      10. -
      -

      Congratulations! You have learned how to activate 3Planesoft Screensaver Manager with serial keygen. Now you can explore and customize various 3D screensavers on your PC. Have fun!

      - -

      Why Use 3Planesoft Screensaver Manager?

      -

      3Planesoft Screensaver Manager is not just a simple tool to manage your screensavers. It is also a gateway to a collection of stunning 3D screensavers that will transform your PC into a virtual reality. 3Planesoft is an independent screensaver developing company that specializes in 3D screensavers. They have released over 100 titles and are currently the most popular 3D screensaver maker on the internet[^1^].

      -

      -

      Some of their best-selling screensavers include Earth 3D Screensaver, which turns your monitor into a realistic space shuttle window; Coral Reef 3D Screensaver, which reveals the secrets of the underwater world; and Winter Village 3D Screensaver, which shows a cozy village in the capricious winter weather. You can download these and many more screensavers from their official website: https://www.3planesoft.com/.

      -

      With 3Planesoft Screensaver Manager, you can easily access and switch between all these screensavers from one convenient interface. You can also customize their settings and appearance, such as resolution, sound volume, brightness, and color. You can even create your own playlists and schedule different screensavers for different times of the day or week.

      -

      What Are the Benefits of 3Planesoft Screensaver Manager?

      -

      Using 3Planesoft Screensaver Manager has many benefits for your PC and yourself. Here are some of them:

      -
        -
      • It protects your monitor from burn-in and prolongs its lifespan. Burn-in is a phenomenon where a static image gets permanently imprinted on the screen due to prolonged exposure. By using dynamic and changing screensavers, you can prevent this from happening and keep your monitor in good condition.
      • -
      • It enhances your mood and productivity. Studies have shown that watching relaxing and beautiful scenes can reduce stress and improve mental health. By using 3Planesoft screensavers, you can enjoy various sceneries and themes that will calm your mind and inspire your creativity.
      • -
      • It entertains you and your guests. 3Planesoft screensavers are not just for your eyes only. They are also great for showing off to your friends and family. You can impress them with the realistic and immersive graphics, sounds, and animations of your screensavers. You can also use them as a conversation starter or a background for parties and events.
      • -
      -

      As you can see, 3Planesoft Screensaver Manager is more than just a software. It is a way to enhance your PC experience and enjoy various 3D screensavers that will suit your taste and mood.

      cec2833e83
      -
      -
      \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Microsoft Toolkit 2.4.5 Crack.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Microsoft Toolkit 2.4.5 Crack.md deleted file mode 100644 index fc1cf8e0de5187d67bca5448756d5c5e1fc473f8..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Microsoft Toolkit 2.4.5 Crack.md +++ /dev/null @@ -1,44 +0,0 @@ -
      -

      What is Microsoft Toolkit 2.4.5 and how to use it?

      -

      Microsoft Toolkit 2.4.5 is a free tool that can help you activate Windows and Microsoft Office products using the Key Management Service (KMS) technology. It can also manage and customize some settings and features of these products.

      -

      Microsoft Toolkit 2.4.5 crack


      Download Filehttps://urlcod.com/2uIcsD



      -

      KMS is a method of activation that allows you to use Microsoft products without purchasing a license key. It works by connecting to a KMS server that provides a valid activation for a certain period of time, usually 180 days. You need to renew the activation periodically by connecting to the same or another KMS server.

      -

      Microsoft Toolkit 2.4.5 can help you find and connect to a KMS server automatically or manually. It can also create a virtual KMS server on your computer and activate your products locally. You can also check the activation status and backup or restore your activation data.

      -

      To use Microsoft Toolkit 2.4.5, you need to download it from a reliable source and run it as an administrator. You will see a user interface with two tabs: Main and Activation. On the Main tab, you can select the product you want to activate, such as Windows or Office, and click on the EZ-Activator button. This will start the activation process and show you the progress and results.

      -

      On the Activation tab, you can see more options and details about the activation process. You can choose the activation method, such as AutoKMS or AutoRearm, and change some settings, such as the KMS server address, port, service name, etc. You can also check the activation status of your products and view or save the log files.

      -

      -

      Microsoft Toolkit 2.4.5 is a useful tool for activating Windows and Office products without buying a license key. However, you should be aware that using it may violate Microsoft's terms of service and may not be legal in some countries. You should also scan it for viruses before using it and use it at your own risk.

      - -

      How to activate Windows with Microsoft Toolkit 2.4.5?

      -

      Microsoft Toolkit 2.4.5 can also help you activate Windows products using the same KMS technology. You can activate Windows Vista, 7, 8, 8.1 and 10 with this tool.

      -

      To activate Windows with Microsoft Toolkit 2.4.5, you need to follow these steps:

      -
        -
      1. Open Microsoft Toolkit 2.4.5 as an administrator.
      2. -
      3. Select the Windows logo on the Main tab.
      4. -
      5. Click on the EZ-Activator button on the Activation tab.
      6. -
      7. Wait for the activation process to complete and check the results.
      8. -
      -

      You can also customize some settings and features of Windows using the Settings tab. For example, you can change the product key, enable or disable auto-rearm, create or delete backup, etc.

      - -

      What are the advantages and disadvantages of using Microsoft Toolkit 2.4.5?

      -

      Microsoft Toolkit 2.4.5 has some advantages and disadvantages that you should consider before using it.

      -

      Some of the advantages are:

      -
        -
      • It is free and easy to use.
      • -
      • It can activate multiple products with one tool.
      • -
      • It can work offline and online.
      • -
      • It can backup and restore your activation data.
      • -
      -

      Some of the disadvantages are:

      -
        -
      • It may not be legal and ethical to use it.
      • -
      • It may not be compatible with some antivirus programs.
      • -
      • It may not work with some updates or versions of Windows and Office.
      • -
      • It may cause some errors or problems on your system.
      • -
      - -

      Where can you download Microsoft Toolkit 2.4.5?

      -

      Microsoft Toolkit 2.4.5 is not an official product of Microsoft, so you cannot download it from their website or store. You need to find a reliable source that offers a clean and safe download link for this tool.

      -

      One of the possible sources is microsoft-toolkit.software.informer.com, where you can download Microsoft Toolkit 2.4.5 for free and without registration. However, you should always scan any file that you download from the internet for viruses and malware before opening it.

      81aa517590
      -
      -
      \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Songs Of New Hindi Movie Queen Snoop Suite Materiau [PATCHED].md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Songs Of New Hindi Movie Queen Snoop Suite Materiau [PATCHED].md deleted file mode 100644 index fe8192b696334255a0cf935416cab697519703a9..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Songs Of New Hindi Movie Queen Snoop Suite Materiau [PATCHED].md +++ /dev/null @@ -1,24 +0,0 @@ -
      -

      Songs Of New Hindi Movie Queen: A Musical Journey With Kangana Ranaut

      -

      Queen is a 2014 Hindi comedy-drama film starring Kangana Ranaut as Rani, a shy and naive girl who embarks on a solo honeymoon trip after being dumped by her fiance. The film features a soundtrack composed by Amit Trivedi, who blends various genres and styles to create a musical journey that reflects Rani's transformation and growth. Here are some of the songs of the new Hindi movie Queen that you can enjoy.

      -

      Songs Of New Hindi Movie Queen snoop suite materiau


      DOWNLOAD ……… https://urlcod.com/2uI9QK



      -
        -
      • London Thumakda: This is the opening song of the film, where Rani and her family are preparing for her wedding. The song is a lively and upbeat Punjabi number that captures the festive mood and the excitement of the bride-to-be. The song is sung by Labh Janjua, Sonu Kakkar and Neha Kakkar.
      • -
      • Badra Bahaar: This is a melancholic and soulful song that plays when Rani is heartbroken and decides to go on her honeymoon alone. The song is a fusion of rock and classical music, with Amit Trivedi's vocals and lyrics expressing Rani's pain and loneliness.
      • -
      • O Gujariya: This is a peppy and energetic song that plays when Rani meets Vijayalakshmi (Lisa Haydon), a free-spirited and adventurous girl who becomes her friend in Paris. The song is a mix of Hindi and English lyrics, with Shefali Alvares and Nikhil D'Souza singing about living life to the fullest.
      • -
      • Taake Jhanke: This is a romantic and playful song that plays when Rani starts to develop feelings for Aakash (Vikas Bahl), a musician who helps her rediscover herself in Amsterdam. The song is a blend of folk and pop music, with Arijit Singh and Nandini Srikar singing about the sparks flying between the two.
      • -
      • Harjaiyaan: This is a beautiful and emotional song that plays when Rani realizes that she has fallen in love with Aakash, but also has to face the reality of her situation. The song is a soft and soothing melody, with Nandini Srikar's vocals and lyrics conveying Rani's dilemma and dilemma.
      • -
      • Kinare: This is the closing song of the film, where Rani returns to India as a confident and independent woman who has found herself. The song is a motivational and inspirational anthem, with Mohan Kanan's vocals and lyrics celebrating Rani's journey and achievements.
      • -
      -

      These are some of the songs of the new Hindi movie Queen that you can listen to and enjoy. The film is a heartwarming and humorous story of self-discovery and empowerment, with Kangana Ranaut delivering a stellar performance as Rani. The soundtrack of the film is equally impressive, with Amit Trivedi creating a musical masterpiece that complements the film's theme and mood.

      If you want to know more about the film Queen and its songs, here are some interesting facts and trivia that you might find fascinating.

      -
        -
      • The film Queen was inspired by the real-life story of Anurag Kashyap's ex-girlfriend, who went on a solo trip to Europe after breaking up with him.
      • -
      • The film Queen was shot in various locations across Paris, Amsterdam and Delhi, with the crew using hidden cameras and real people to capture the authentic feel of the places.
      • -
      • The film Queen was a critical and commercial success, winning several awards and accolades, including the National Film Award for Best Feature Film in Hindi and the Filmfare Award for Best Film.
      • -
      • The film Queen was praised for its feminist and progressive message, as well as its portrayal of female friendship and empowerment.
      • -
      • The soundtrack of the film Queen was also a hit, with songs like London Thumakda and O Gujariya becoming popular among the audiences. The soundtrack also featured a remix version of Hungama Ho Gaya, a classic song from the 1973 film Anhonee.
      • -
      • The film Queen marked the debut of Vikas Bahl as a director, who later went on to direct films like Shaandaar and Super 30.
      • -
      -

      These are some of the facts and trivia about the film Queen and its songs that you might find interesting. The film Queen is a must-watch for anyone who loves comedy-drama films with a strong female protagonist and a catchy soundtrack. The film Queen is available on various streaming platforms like Netflix and Amazon Prime Video.

      cec2833e83
      -
      -
      \ No newline at end of file diff --git a/spaces/nickil/weakly-supervised-parsing/weakly_supervised_parser/tree/__init__.py b/spaces/nickil/weakly-supervised-parsing/weakly_supervised_parser/tree/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/nightfury/whisperAI/app.py b/spaces/nightfury/whisperAI/app.py deleted file mode 100644 index 79ea0ece4462d08cf18bf4ffa44fe85d70f85119..0000000000000000000000000000000000000000 --- a/spaces/nightfury/whisperAI/app.py +++ /dev/null @@ -1,158 +0,0 @@ -import gradio as gr -#from pyChatGPT import ChatGPT -import os -import requests -api = os.environ.get('API_ENDPOINT') -#session_token = os.environ.get('SessionToken') -#cf_clearance_token = os.environ.get('ClearanceToken') -#cf_bm_token = os.environ.get('cf_bm_token') -whisper = gr.Interface.load(name="spaces/sanchit-gandhi/whisper-large-v2") - -def call_api(message): - response = requests.get(f'{api}?q={message}') - if response.status_code == 200: - - return str(response.text).split('\n', 2)[2] - else: - return """Sorry, I'm quite busy right now, but please try again later :)""" - -def chat_hf(audio, task): - - try: - whisper_text = translate(audio, task) - if whisper_text == "ERROR: You have to either use the microphone or upload an audio file": - gpt_response = "MISSING AUDIO: Record your voice by clicking the microphone button, do not forget to stop recording before sending your message ;)" - else: - gpt_response = call_api(whisper_text) - #api = ChatGPT(session_token, cf_clearance_token, cf_bm_token) - #api = ChatGPT(session_token) - #api.refresh_auth() # refresh the authorization token - #if reset_conversation: - # - # api.reset_conversation() # reset the conversation - #resp = api.send_message(whisper_text) - #gpt_response = resp['message'] - - except: - - - gpt_response = """Sorry, I'm quite busy right now, but please try again later :)""" - - print(f""" - {whisper_text} - ———— - {gpt_response} - """) - - return whisper_text, gpt_response - - -def translate(audio, task): - - if task == "transcribe": - text_result = whisper(audio, None, "transcribe", fn_index=0) - else: - text_result = whisper(audio, None, "translate", fn_index=0) - - return text_result - -title = """ -
      -
      -

      - Whisper-to-chatGPT -

      -
      -

      - Chat with GPT with your voice in your native language ! - - -

      -""" - -article = """ -

      Note: this demo is not able to sustain a conversation from earlier responses. - For more detailed results and dialogue, you should use the official ChatGPT interface. -
      — -
      Also, be aware that audio records from iOS devices will not be decoded as expected by Gradio. For the best experience, record your voice from a computer instead of your smartphone ;)

      - -""" - -css = ''' - #col-container, #col-container-2 {max-width: 510px; margin-left: auto; margin-right: auto;} - a {text-decoration-line: underline; font-weight: 600;} - div#record_btn > .mt-6 { - margin-top: 0!important; - } - div#record_btn > .mt-6 button { - width: 100%; - height: 40px; - } - .footer { - margin-bottom: 45px; - margin-top: 10px; - text-align: center; - border-bottom: 1px solid #e5e5e5; - } - .footer>p { - font-size: .8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(10px); - background: white; - } - .dark .footer { - border-color: #303030; - } - .dark .footer>p { - background: #0b0f19; - } -''' - - - -with gr.Blocks(css=css) as demo: - - with gr.Column(elem_id="col-container"): - - gr.HTML(title) - - with gr.Row(): - record_input = gr.Audio(source="microphone",type="filepath", show_label=False,elem_id="record_btn") - task = gr.Radio(choices=["transcribe","translate"], value="transcribe", show_label=False) - - with gr.Row(): - #reset_conversation = gr.Checkbox(label="Reset conversation?", value=False) - send_btn = gr.Button("Send my request !") - #custom_token = gr.Textbox(label='If it fails, use your own session token', placeholder="your own session token", max_lines=3) - - with gr.Column(elem_id="col-container-2"): - audio_translation = gr.Textbox(type="text",label="Whisper transcript") - gpt_response = gr.Textbox(type="text",label="chatGPT response") - - gr.HTML(article) - - send_btn.click(chat_hf, inputs=[record_input, task], outputs=[audio_translation, gpt_response]) - -demo.queue(max_size=32, concurrency_count=20).launch(debug=True) diff --git a/spaces/nmitchko/AI-in-Healthcare/Developer Meetup in Boston Generative AI Use Cases in Healthcare _files/plugin_014.js b/spaces/nmitchko/AI-in-Healthcare/Developer Meetup in Boston Generative AI Use Cases in Healthcare _files/plugin_014.js deleted file mode 100644 index c12fc62c00a35ca5259d844453434c0f4920aeb9..0000000000000000000000000000000000000000 --- a/spaces/nmitchko/AI-in-Healthcare/Developer Meetup in Boston Generative AI Use Cases in Healthcare _files/plugin_014.js +++ /dev/null @@ -1,144 +0,0 @@ -(function ($) { - CKEDITOR.plugins.add("codesnippet", { - requires: "widget,dialog,contextmenu", - icons: "codesnippet", - - onLoad: function () { - CKEDITOR.dialog.add("codeSnippet", this.path + "dialogs/codesnippet.js"); - }, - - init: function (editor) { - editor.ui.addButton && - editor.ui.addButton("codesnippet", { - label: "Codesnippet", - command: "codeSnippet", - toolbar: "insert,99", - group: "44", - icon: this.path + "icons/codesnippet.png", - }); - - editor.addCommand("removewidget", createDef({ - exec: function (editor) { - if (editor.widgets.focused) { - let element = editor.widgets.focused; - let range = editor.createRange(); - range.moveToPosition( element.element, CKEDITOR.POSITION_BEFORE_START ); - range.select(); - element.destroy(); - element.element.remove(); - } - }, - })); - - editor.addMenuGroup("codesnippet"); - editor.addMenuItems({ - myCommand: { - label: "Delete Snippet", - command: "removewidget", - group: "codesnippet", - order: 1 - }, - }); - - if (editor.contextMenu) - editor.contextMenu.addListener(() => {return { myCommand: CKEDITOR.TRISTATE_OFF } }); - }, - - afterInit: function (editor) { - registerWidget(editor, this.path); - }, - }); - - function createDef( def ) { - return CKEDITOR.tools.extend( def || {}, { - contextSensitive: 1, - refresh: function( editor, path ) { - this.setState((editor.widgets.focused) ? CKEDITOR.TRISTATE_OFF : CKEDITOR.TRISTATE_DISABLED); - } - }); - } - - function registerWidget(editor, path) { - var codeClass = editor.config.codeSnippet_codeClass; - - editor.config.shiftEnterMode = CKEDITOR.ENTER_P; - editor.settings = CKEDITOR.tools.extend( - { - cls: "", - modes: [ - //[View for pre, type for aceEditor, type for highlightjs] - ["ObjectScript", "text", "cls"], - ["SQL", "sql", "sql"], - ["Python", "python", "python"], - ["JSON", "json", "json"], - ["YAML", "yaml", "yaml"], - ["JavaScript", "javascript", "javascript"], - ["Dockerfile", "dockerfile", "dockerfile"], - ["HTML", "html", "html"], - ["CSS", "css", "css"], - ["Java", "java", "java"], - ["XML", "xml", "xml"], - ["Bash", "bash", "bash"], - ["Shell Session", "shell", "shell"], - ["PHP", "php", "php"], - ], - tab_size: 4, - js: "//cdnjs.cloudflare.com/ajax/libs/ace/1.2.6/", - }, - editor.config.codeSnippet, - true - ); - - CKEDITOR.scriptLoader.load([ - (editor.settings.js + "ace.js"), - "/sites/all/libraries/highlightjs/highlight.pack.min.js", - ]); - - editor.addContentsCss(`/sites/all/libraries/highlightjs/styles/${editor.config.codeSnippet_theme}.css`); - editor.addContentsCss(path + "dialogs/style.css"); - - editor.on( - "instanceReady", - function () { - CKEDITOR.document.appendStyleSheet(path + "dialogs/style.css"); - CKEDITOR.scriptLoader.load([(editor.settings.js + "ext-whitespace.js")]); - }.bind(this) - ); - - editor.widgets.add("codeSnippet", { - allowedContent: "pre; code(language-*); p;", - requiredContent: "pre", - styleableElements: "pre", - template: `
      `, - dialog: "codeSnippet", - pathName: "codesnippet", - mask: true, - - parts: { - pre: "pre", - code: "code" - }, - - upcast: function (element, data) { - if(element.hasClass("codeblock-container")) data.lang = element.attributes.idlang; - data.tabsize = (element.attributes.tabsize) ? element.attributes.tabsize : 4; - return element.hasClass("codeblock-container"); - }, - - data: function () { - var data = this.data; - - if(data.code === undefined) data.code = this.parts.code.getText(); - - if (data.lang !== undefined) { - this.parts.code.setHtml(CKEDITOR.tools.htmlEncode(data.code)); - this.parts.code.$.className = `language-${editor.settings.modes[data.lang][2]} ${editor.config.codeSnippet_codeClass}`; - hljs.highlightBlock(this.parts.code.$, editor.settings.modes[data.lang][2]); - } - }, - }); - } -})(jQuery); - -CKEDITOR.config.codeSnippet_codeClass = "hljs"; -CKEDITOR.config.codeSnippet_theme = "idea"; diff --git a/spaces/oguzakif/video-object-remover/SiamMask/data/ytb_vos/readme.md b/spaces/oguzakif/video-object-remover/SiamMask/data/ytb_vos/readme.md deleted file mode 100644 index a924f8cc6ae1fd293849c9a236c74038df87cc95..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/SiamMask/data/ytb_vos/readme.md +++ /dev/null @@ -1,17 +0,0 @@ -# Preprocessing [Youtube-VOS](https://youtube-vos.org/dataset/download) - -### Download raw images and annotations ([website](https://youtube-vos.org/dataset/download), 8.3G) - -````shell -python download_from_gdrive.py https://drive.google.com/uc?id=18S_db1cFgSD1RsMsofJLkd6SyR9opk6a --output train.zip -unzip ./train.zip -python parse_ytb_vos.py # really slow -```` - -### Crop & Generate data info (10 min) - -````shell -#python par_crop.py [crop_size] [num_threads] -python par_crop.py 511 12 -python gen_json.py -```` diff --git a/spaces/oguzakif/video-object-remover/SiamMask/experiments/siammask_sharp/test_all.sh b/spaces/oguzakif/video-object-remover/SiamMask/experiments/siammask_sharp/test_all.sh deleted file mode 100644 index 4f7e83f18591a72b2afd66961c9e91f31fc61e92..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/SiamMask/experiments/siammask_sharp/test_all.sh +++ /dev/null @@ -1,83 +0,0 @@ -show_help() { -cat << EOF -Usage: - ${0##*/} [-h/--help] [-s/--start] [-e/--end] [-d/--dataset] [-m/--model] [-g/--gpu] - e.g. - bash ${0##*/} -s 1 -e 20 -d VOT2018 -g 4 # for test models - bash ${0##*/} -m snapshot/checkpoint_e10.pth -n 8 -g 4 # for tune models -EOF -} - -ROOT=`git rev-parse --show-toplevel` -source activate siammask -export PYTHONPATH=$ROOT:$PYTHONPATH -export PYTHONPATH=$PWD:$PYTHONPATH - -dataset=VOT2018 -NUM=4 -START=1 -END=20 -GPU=0 - -while [[ $# -gt 0 ]] -do - key="$1" - case $key in - -h|--help) - show_help - exit - ;; - -d|--dataset) - dataset=$2 - shift 2 - ;; - -n|--num) - NUM=$2 - shift 2 - ;; - -s|--start) - START=$2 - shift 2 - ;; - -e|--end) - END=$2 - shift 2 - ;; - -m|--model) - model=$2 - shift 2 - ;; - -g|--gpu) - GPU=$2 - shift 2 - ;; - *) - echo invalid arg [$1] - show_help - exit 1 - ;; - esac -done - -set -e - -if [ -z "$model" ]; then - echo test snapshot $START ~ $END on dataset $dataset with $GPU gpus. - for i in $(seq $START $END) - do - bash test_mask_refine.sh config_vot18.json snapshot/checkpoint_e$i.pth $dataset $(($i % $GPU)) & - done - wait - - python $ROOT/tools/eval.py --dataset $dataset --num 20 --tracker_prefix C --result_dir ./test/$dataset 2>&1 | tee logs/eval_test_$dataset.log -else - echo tuning $model on dataset $dataset with $NUM jobs in $GPU gpus. - for i in $(seq 1 $NUM) - do - bash tune.sh $model $dataset $(($i % $GPU)) & - done - wait - rm finish.flag - - python $ROOT/tools/eval.py --dataset $dataset --num 20 --tracker_prefix C --result_dir ./result/$dataset 2>&1 | tee logs/eval_tune_$dataset.log -fi diff --git a/spaces/oncetalk/syzymon-long_llama_3b/README.md b/spaces/oncetalk/syzymon-long_llama_3b/README.md deleted file mode 100644 index a6d77679d8bb9c3af7f966ed6d91461d156cde39..0000000000000000000000000000000000000000 --- a/spaces/oncetalk/syzymon-long_llama_3b/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Syzymon-long Llama 3b -emoji: 🐢 -colorFrom: red -colorTo: red -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/onereal/Voice-Cloning-for-you/Makefile b/spaces/onereal/Voice-Cloning-for-you/Makefile deleted file mode 100644 index ad23323414bd2175956f6aef92f223a02f7258be..0000000000000000000000000000000000000000 --- a/spaces/onereal/Voice-Cloning-for-you/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -.PHONY: quality style - -# Check that source code meets quality standards -quality: - black --check --diff . - ruff . - -# Format source code automatically -style: - black . - ruff . --fix diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/pipelines/stable_diffusion/text2img.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/pipelines/stable_diffusion/text2img.md deleted file mode 100644 index 8d09602d860554f847f2936fe2198deb871c7382..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/pipelines/stable_diffusion/text2img.md +++ /dev/null @@ -1,59 +0,0 @@ - - -# Text-to-image - -The Stable Diffusion model was created by researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/), [Runway](https://github.com/runwayml), and [LAION](https://laion.ai/). The [`StableDiffusionPipeline`] is capable of generating photorealistic images given any text input. It's trained on 512x512 images from a subset of the LAION-5B dataset. This model uses a frozen CLIP ViT-L/14 text encoder to condition the model on text prompts. With its 860M UNet and 123M text encoder, the model is relatively lightweight and can run on consumer GPUs. Latent diffusion is the research on top of which Stable Diffusion was built. It was proposed in [High-Resolution Image Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) by Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, Björn Ommer. - -The abstract from the paper is: - -*By decomposing the image formation process into a sequential application of denoising autoencoders, diffusion models (DMs) achieve state-of-the-art synthesis results on image data and beyond. Additionally, their formulation allows for a guiding mechanism to control the image generation process without retraining. However, since these models typically operate directly in pixel space, optimization of powerful DMs often consumes hundreds of GPU days and inference is expensive due to sequential evaluations. To enable DM training on limited computational resources while retaining their quality and flexibility, we apply them in the latent space of powerful pretrained autoencoders. In contrast to previous work, training diffusion models on such a representation allows for the first time to reach a near-optimal point between complexity reduction and detail preservation, greatly boosting visual fidelity. By introducing cross-attention layers into the model architecture, we turn diffusion models into powerful and flexible generators for general conditioning inputs such as text or bounding boxes and high-resolution synthesis becomes possible in a convolutional manner. Our latent diffusion models (LDMs) achieve a new state of the art for image inpainting and highly competitive performance on various tasks, including unconditional image generation, semantic scene synthesis, and super-resolution, while significantly reducing computational requirements compared to pixel-based DMs. Code is available at https://github.com/CompVis/latent-diffusion.* - - - -Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently! - -If you're interested in using one of the official checkpoints for a task, explore the [CompVis](https://huggingface.co/CompVis), [Runway](https://huggingface.co/runwayml), and [Stability AI](https://huggingface.co/stabilityai) Hub organizations! - - - -## StableDiffusionPipeline - -[[autodoc]] StableDiffusionPipeline - - all - - __call__ - - enable_attention_slicing - - disable_attention_slicing - - enable_vae_slicing - - disable_vae_slicing - - enable_xformers_memory_efficient_attention - - disable_xformers_memory_efficient_attention - - enable_vae_tiling - - disable_vae_tiling - - load_textual_inversion - - from_single_file - - load_lora_weights - - save_lora_weights - -## StableDiffusionPipelineOutput - -[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput - -## FlaxStableDiffusionPipeline - -[[autodoc]] FlaxStableDiffusionPipeline - - all - - __call__ - -## FlaxStableDiffusionPipelineOutput - -[[autodoc]] pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput \ No newline at end of file diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/community/seed_resize_stable_diffusion.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/community/seed_resize_stable_diffusion.py deleted file mode 100644 index 5891b9fb11a83ad2706232ff53999e7c110821f9..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/community/seed_resize_stable_diffusion.py +++ /dev/null @@ -1,366 +0,0 @@ -""" - modified based on diffusion library from Huggingface: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py -""" -import inspect -from typing import Callable, List, Optional, Union - -import torch -from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer - -from diffusers import DiffusionPipeline -from diffusers.models import AutoencoderKL, UNet2DConditionModel -from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput -from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker -from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler -from diffusers.utils import logging - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -class SeedResizeStableDiffusionPipeline(DiffusionPipeline): - r""" - Pipeline for text-to-image generation using Stable Diffusion. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. - text_encoder ([`CLIPTextModel`]): - Frozen text-encoder. Stable Diffusion uses the text portion of - [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically - the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. - tokenizer (`CLIPTokenizer`): - Tokenizer of class - [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). - unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - safety_checker ([`StableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offensive or harmful. - Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. - feature_extractor ([`CLIPImageProcessor`]): - Model that extracts features from generated images to be used as inputs for the `safety_checker`. - """ - - def __init__( - self, - vae: AutoencoderKL, - text_encoder: CLIPTextModel, - tokenizer: CLIPTokenizer, - unet: UNet2DConditionModel, - scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], - safety_checker: StableDiffusionSafetyChecker, - feature_extractor: CLIPImageProcessor, - ): - super().__init__() - self.register_modules( - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - - def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): - r""" - Enable sliced attention computation. - - When this option is enabled, the attention module will split the input tensor in slices, to compute attention - in several steps. This is useful to save some memory in exchange for a small speed decrease. - - Args: - slice_size (`str` or `int`, *optional*, defaults to `"auto"`): - When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If - a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, - `attention_head_dim` must be a multiple of `slice_size`. - """ - if slice_size == "auto": - # half the attention head size is usually a good trade-off between - # speed and memory - slice_size = self.unet.config.attention_head_dim // 2 - self.unet.set_attention_slice(slice_size) - - def disable_attention_slicing(self): - r""" - Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go - back to computing attention in one step. - """ - # set slice_size = `None` to disable `attention slicing` - self.enable_attention_slicing(None) - - @torch.no_grad() - def __call__( - self, - prompt: Union[str, List[str]], - height: int = 512, - width: int = 512, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[torch.Generator] = None, - latents: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: int = 1, - text_embeddings: Optional[torch.FloatTensor] = None, - **kwargs, - ): - r""" - Function invoked when calling the pipeline for generation. - - Args: - prompt (`str` or `List[str]`): - The prompt or prompts to guide the image generation. - height (`int`, *optional*, defaults to 512): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to 512): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored - if `guidance_scale` is less than `1`). - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`torch.Generator`, *optional*): - A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation - deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. - When returning a tuple, the first element is a list with the generated images, and the second element is a - list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, according to the `safety_checker`. - """ - - if isinstance(prompt, str): - batch_size = 1 - elif isinstance(prompt, list): - batch_size = len(prompt) - else: - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if height % 8 != 0 or width % 8 != 0: - raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - # get prompt text embeddings - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - - if text_input_ids.shape[-1] > self.tokenizer.model_max_length: - removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] - - if text_embeddings is None: - text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0] - - # duplicate text embeddings for each generation per prompt, using mps friendly method - bs_embed, seq_len, _ = text_embeddings.shape - text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1) - text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) - - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] - elif type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = negative_prompt - - max_length = text_input_ids.shape[-1] - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="pt", - ) - uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] - - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - seq_len = uncond_embeddings.shape[1] - uncond_embeddings = uncond_embeddings.repeat(batch_size, num_images_per_prompt, 1) - uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) - - # get the initial random noise unless the user supplied it - - # Unlike in other pipelines, latents need to be generated in the target device - # for 1-to-1 results reproducibility with the CompVis implementation. - # However this currently doesn't work in `mps`. - latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) - latents_shape_reference = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) - latents_dtype = text_embeddings.dtype - if latents is None: - if self.device.type == "mps": - # randn does not exist on mps - latents_reference = torch.randn( - latents_shape_reference, generator=generator, device="cpu", dtype=latents_dtype - ).to(self.device) - latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to( - self.device - ) - else: - latents_reference = torch.randn( - latents_shape_reference, generator=generator, device=self.device, dtype=latents_dtype - ) - latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype) - else: - if latents_reference.shape != latents_shape: - raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") - latents_reference = latents_reference.to(self.device) - latents = latents.to(self.device) - - # This is the key part of the pipeline where we - # try to ensure that the generated images w/ the same seed - # but different sizes actually result in similar images - dx = (latents_shape[3] - latents_shape_reference[3]) // 2 - dy = (latents_shape[2] - latents_shape_reference[2]) // 2 - w = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx - h = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy - tx = 0 if dx < 0 else dx - ty = 0 if dy < 0 else dy - dx = max(-dx, 0) - dy = max(-dy, 0) - # import pdb - # pdb.set_trace() - latents[:, :, ty : ty + h, tx : tx + w] = latents_reference[:, :, dy : dy + h, dx : dx + w] - - # set timesteps - self.scheduler.set_timesteps(num_inference_steps) - - # Some schedulers like PNDM have timesteps as arrays - # It's more optimized to move all timesteps to correct device beforehand - timesteps_tensor = self.scheduler.timesteps.to(self.device) - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - for i, t in enumerate(self.progress_bar(timesteps_tensor)): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # predict the noise residual - noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample - - # call the callback, if provided - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - latents = 1 / 0.18215 * latents - image = self.vae.decode(latents).sample - - image = (image / 2 + 0.5).clamp(0, 1) - - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 - image = image.cpu().permute(0, 2, 3, 1).float().numpy() - - if self.safety_checker is not None: - safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to( - self.device - ) - image, has_nsfw_concept = self.safety_checker( - images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype) - ) - else: - has_nsfw_concept = None - - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image, has_nsfw_concept) - - return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/scripts/convert_stable_diffusion_controlnet_to_tensorrt.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/scripts/convert_stable_diffusion_controlnet_to_tensorrt.py deleted file mode 100644 index 52ab02c221e91c655df9c1698afc49cdb5bdb91a..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/scripts/convert_stable_diffusion_controlnet_to_tensorrt.py +++ /dev/null @@ -1,121 +0,0 @@ -import argparse -import sys - -import tensorrt as trt - - -def convert_models(onnx_path: str, num_controlnet: int, output_path: str, fp16: bool = False, sd_xl: bool = False): - """ - Function to convert models in stable diffusion controlnet pipeline into TensorRT format - - Example: - python convert_stable_diffusion_controlnet_to_tensorrt.py - --onnx_path path-to-models-stable_diffusion/RevAnimated-v1-2-2/unet/model.onnx - --output_path path-to-models-stable_diffusion/RevAnimated-v1-2-2/unet/model.engine - --fp16 - --num_controlnet 2 - - Example for SD XL: - python convert_stable_diffusion_controlnet_to_tensorrt.py - --onnx_path path-to-models-stable_diffusion/stable-diffusion-xl-base-1.0/unet/model.onnx - --output_path path-to-models-stable_diffusion/stable-diffusion-xl-base-1.0/unet/model.engine - --fp16 - --num_controlnet 1 - --sd_xl - - Returns: - unet/model.engine - - run test script in diffusers/examples/community - python test_onnx_controlnet.py - --sd_model danbrown/RevAnimated-v1-2-2 - --onnx_model_dir path-to-models-stable_diffusion/RevAnimated-v1-2-2 - --unet_engine_path path-to-models-stable_diffusion/stable-diffusion-xl-base-1.0/unet/model.engine - --qr_img_path path-to-qr-code-image - """ - # UNET - if sd_xl: - batch_size = 1 - unet_in_channels = 4 - unet_sample_size = 64 - num_tokens = 77 - text_hidden_size = 2048 - img_size = 512 - - text_embeds_shape = (2 * batch_size, 1280) - time_ids_shape = (2 * batch_size, 6) - else: - batch_size = 1 - unet_in_channels = 4 - unet_sample_size = 64 - num_tokens = 77 - text_hidden_size = 768 - img_size = 512 - batch_size = 1 - - latents_shape = (2 * batch_size, unet_in_channels, unet_sample_size, unet_sample_size) - embed_shape = (2 * batch_size, num_tokens, text_hidden_size) - controlnet_conds_shape = (num_controlnet, 2 * batch_size, 3, img_size, img_size) - - TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE) - TRT_BUILDER = trt.Builder(TRT_LOGGER) - TRT_RUNTIME = trt.Runtime(TRT_LOGGER) - - network = TRT_BUILDER.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) - onnx_parser = trt.OnnxParser(network, TRT_LOGGER) - - parse_success = onnx_parser.parse_from_file(onnx_path) - for idx in range(onnx_parser.num_errors): - print(onnx_parser.get_error(idx)) - if not parse_success: - sys.exit("ONNX model parsing failed") - print("Load Onnx model done") - - profile = TRT_BUILDER.create_optimization_profile() - - profile.set_shape("sample", latents_shape, latents_shape, latents_shape) - profile.set_shape("encoder_hidden_states", embed_shape, embed_shape, embed_shape) - profile.set_shape("controlnet_conds", controlnet_conds_shape, controlnet_conds_shape, controlnet_conds_shape) - if sd_xl: - profile.set_shape("text_embeds", text_embeds_shape, text_embeds_shape, text_embeds_shape) - profile.set_shape("time_ids", time_ids_shape, time_ids_shape, time_ids_shape) - - config = TRT_BUILDER.create_builder_config() - config.add_optimization_profile(profile) - config.set_preview_feature(trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805, True) - if fp16: - config.set_flag(trt.BuilderFlag.FP16) - - plan = TRT_BUILDER.build_serialized_network(network, config) - if plan is None: - sys.exit("Failed building engine") - print("Succeeded building engine") - - engine = TRT_RUNTIME.deserialize_cuda_engine(plan) - - ## save TRT engine - with open(output_path, "wb") as f: - f.write(engine.serialize()) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument("--sd_xl", action="store_true", default=False, help="SD XL pipeline") - - parser.add_argument( - "--onnx_path", - type=str, - required=True, - help="Path to the onnx checkpoint to convert", - ) - - parser.add_argument("--num_controlnet", type=int) - - parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") - - parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") - - args = parser.parse_args() - - convert_models(args.onnx_path, args.num_controlnet, args.output_path, args.fp16, args.sd_xl) diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/controlnet/pipeline_controlnet.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/controlnet/pipeline_controlnet.py deleted file mode 100644 index b899240b0c0e6333934d741c43b7f800d5a03ebd..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/controlnet/pipeline_controlnet.py +++ /dev/null @@ -1,1034 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import inspect -from typing import Any, Callable, Dict, List, Optional, Tuple, Union - -import numpy as np -import PIL.Image -import torch -import torch.nn.functional as F -from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer - -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, ControlNetModel, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( - deprecate, - logging, - replace_example_docstring, -) -from ...utils.torch_utils import is_compiled_module, randn_tensor -from ..pipeline_utils import DiffusionPipeline -from ..stable_diffusion import StableDiffusionPipelineOutput -from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker -from .multicontrolnet import MultiControlNetModel - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -EXAMPLE_DOC_STRING = """ - Examples: - ```py - >>> # !pip install opencv-python transformers accelerate - >>> from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler - >>> from diffusers.utils import load_image - >>> import numpy as np - >>> import torch - - >>> import cv2 - >>> from PIL import Image - - >>> # download an image - >>> image = load_image( - ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" - ... ) - >>> image = np.array(image) - - >>> # get canny image - >>> image = cv2.Canny(image, 100, 200) - >>> image = image[:, :, None] - >>> image = np.concatenate([image, image, image], axis=2) - >>> canny_image = Image.fromarray(image) - - >>> # load control net and stable diffusion v1-5 - >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) - >>> pipe = StableDiffusionControlNetPipeline.from_pretrained( - ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 - ... ) - - >>> # speed up diffusion process with faster scheduler and memory optimization - >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) - >>> # remove following line if xformers is not installed - >>> pipe.enable_xformers_memory_efficient_attention() - - >>> pipe.enable_model_cpu_offload() - - >>> # generate image - >>> generator = torch.manual_seed(0) - >>> image = pipe( - ... "futuristic-looking woman", num_inference_steps=20, generator=generator, image=canny_image - ... ).images[0] - ``` -""" - - -class StableDiffusionControlNetPipeline( - DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin -): - r""" - Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods - implemented for all pipelines (downloading, saving, running on a particular device, etc.). - - The pipeline also inherits the following loading methods: - - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. - text_encoder ([`~transformers.CLIPTextModel`]): - Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). - tokenizer ([`~transformers.CLIPTokenizer`]): - A `CLIPTokenizer` to tokenize text. - unet ([`UNet2DConditionModel`]): - A `UNet2DConditionModel` to denoise the encoded image latents. - controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): - Provides additional conditioning to the `unet` during the denoising process. If you set multiple - ControlNets as a list, the outputs from each ControlNet are added together to create one combined - additional conditioning. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - safety_checker ([`StableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offensive or harmful. - Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details - about a model's potential harms. - feature_extractor ([`~transformers.CLIPImageProcessor`]): - A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. - """ - model_cpu_offload_seq = "text_encoder->unet->vae" - _optional_components = ["safety_checker", "feature_extractor"] - _exclude_from_cpu_offload = ["safety_checker"] - - def __init__( - self, - vae: AutoencoderKL, - text_encoder: CLIPTextModel, - tokenizer: CLIPTokenizer, - unet: UNet2DConditionModel, - controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], - scheduler: KarrasDiffusionSchedulers, - safety_checker: StableDiffusionSafetyChecker, - feature_extractor: CLIPImageProcessor, - requires_safety_checker: bool = True, - ): - super().__init__() - - if safety_checker is None and requires_safety_checker: - logger.warning( - f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" - " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" - " results in services or applications open to the public. Both the diffusers team and Hugging Face" - " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" - " it only for use-cases that involve analyzing network behavior or auditing its results. For more" - " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." - ) - - if safety_checker is not None and feature_extractor is None: - raise ValueError( - "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" - " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." - ) - - if isinstance(controlnet, (list, tuple)): - controlnet = MultiControlNetModel(controlnet) - - self.register_modules( - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - controlnet=controlnet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) - self.control_image_processor = VaeImageProcessor( - vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False - ) - self.register_to_config(requires_safety_checker=requires_safety_checker) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing - def enable_vae_slicing(self): - r""" - Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to - compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. - """ - self.vae.enable_slicing() - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing - def disable_vae_slicing(self): - r""" - Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to - computing decoding in one step. - """ - self.vae.disable_slicing() - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling - def enable_vae_tiling(self): - r""" - Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to - compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow - processing larger images. - """ - self.vae.enable_tiling() - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling - def disable_vae_tiling(self): - r""" - Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to - computing decoding in one step. - """ - self.vae.disable_tiling() - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt - def _encode_prompt( - self, - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt=None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - lora_scale: Optional[float] = None, - **kwargs, - ): - deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." - deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) - - prompt_embeds_tuple = self.encode_prompt( - prompt=prompt, - device=device, - num_images_per_prompt=num_images_per_prompt, - do_classifier_free_guidance=do_classifier_free_guidance, - negative_prompt=negative_prompt, - prompt_embeds=prompt_embeds, - negative_prompt_embeds=negative_prompt_embeds, - lora_scale=lora_scale, - **kwargs, - ) - - # concatenate for backwards comp - prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) - - return prompt_embeds - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt - def encode_prompt( - self, - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt=None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - lora_scale: Optional[float] = None, - clip_skip: Optional[int] = None, - ): - r""" - Encodes the prompt into text encoder hidden states. - - Args: - prompt (`str` or `List[str]`, *optional*): - prompt to be encoded - device: (`torch.device`): - torch device - num_images_per_prompt (`int`): - number of images that should be generated per prompt - do_classifier_free_guidance (`bool`): - whether to use classifier free guidance or not - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is - less than `1`). - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - lora_scale (`float`, *optional*): - A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. - clip_skip (`int`, *optional*): - Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that - the output of the pre-final layer will be used for computing the prompt embeddings. - """ - # set lora scale so that monkey patched LoRA - # function of text encoder can correctly access it - if lora_scale is not None and isinstance(self, LoraLoaderMixin): - self._lora_scale = lora_scale - - # dynamically adjust the LoRA scale - adjust_lora_scale_text_encoder(self.text_encoder, lora_scale, self.use_peft_backend) - - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - if prompt_embeds is None: - # textual inversion: procecss multi-vector tokens if necessary - if isinstance(self, TextualInversionLoaderMixin): - prompt = self.maybe_convert_prompt(prompt, self.tokenizer) - - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - - if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( - text_input_ids, untruncated_ids - ): - removed_text = self.tokenizer.batch_decode( - untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] - ) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = text_inputs.attention_mask.to(device) - else: - attention_mask = None - - if clip_skip is None: - prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) - prompt_embeds = prompt_embeds[0] - else: - prompt_embeds = self.text_encoder( - text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True - ) - # Access the `hidden_states` first, that contains a tuple of - # all the hidden states from the encoder layers. Then index into - # the tuple to access the hidden states from the desired layer. - prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] - # We also need to apply the final LayerNorm here to not mess with the - # representations. The `last_hidden_states` that we typically use for - # obtaining the final prompt representations passes through the LayerNorm - # layer. - prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) - - if self.text_encoder is not None: - prompt_embeds_dtype = self.text_encoder.dtype - elif self.unet is not None: - prompt_embeds_dtype = self.unet.dtype - else: - prompt_embeds_dtype = prompt_embeds.dtype - - prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) - - bs_embed, seq_len, _ = prompt_embeds.shape - # duplicate text embeddings for each generation per prompt, using mps friendly method - prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) - prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) - - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance and negative_prompt_embeds is None: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif prompt is not None and type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = negative_prompt - - # textual inversion: procecss multi-vector tokens if necessary - if isinstance(self, TextualInversionLoaderMixin): - uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) - - max_length = prompt_embeds.shape[1] - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="pt", - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = uncond_input.attention_mask.to(device) - else: - attention_mask = None - - negative_prompt_embeds = self.text_encoder( - uncond_input.input_ids.to(device), - attention_mask=attention_mask, - ) - negative_prompt_embeds = negative_prompt_embeds[0] - - if do_classifier_free_guidance: - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - seq_len = negative_prompt_embeds.shape[1] - - negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) - - negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) - negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) - - return prompt_embeds, negative_prompt_embeds - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker - def run_safety_checker(self, image, device, dtype): - if self.safety_checker is None: - has_nsfw_concept = None - else: - if torch.is_tensor(image): - feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") - else: - feature_extractor_input = self.image_processor.numpy_to_pil(image) - safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) - image, has_nsfw_concept = self.safety_checker( - images=image, clip_input=safety_checker_input.pixel_values.to(dtype) - ) - return image, has_nsfw_concept - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents - def decode_latents(self, latents): - deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" - deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) - - latents = 1 / self.vae.config.scaling_factor * latents - image = self.vae.decode(latents, return_dict=False)[0] - image = (image / 2 + 0.5).clamp(0, 1) - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 - image = image.cpu().permute(0, 2, 3, 1).float().numpy() - return image - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - def check_inputs( - self, - prompt, - image, - callback_steps, - negative_prompt=None, - prompt_embeds=None, - negative_prompt_embeds=None, - controlnet_conditioning_scale=1.0, - control_guidance_start=0.0, - control_guidance_end=1.0, - ): - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - if prompt is not None and prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" - " only forward one of the two." - ) - elif prompt is None and prompt_embeds is None: - raise ValueError( - "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." - ) - elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if negative_prompt is not None and negative_prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" - f" {negative_prompt_embeds}. Please make sure to only forward one of the two." - ) - - if prompt_embeds is not None and negative_prompt_embeds is not None: - if prompt_embeds.shape != negative_prompt_embeds.shape: - raise ValueError( - "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" - f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" - f" {negative_prompt_embeds.shape}." - ) - - # `prompt` needs more sophisticated handling when there are multiple - # conditionings. - if isinstance(self.controlnet, MultiControlNetModel): - if isinstance(prompt, list): - logger.warning( - f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" - " prompts. The conditionings will be fixed across the prompts." - ) - - # Check `image` - is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( - self.controlnet, torch._dynamo.eval_frame.OptimizedModule - ) - if ( - isinstance(self.controlnet, ControlNetModel) - or is_compiled - and isinstance(self.controlnet._orig_mod, ControlNetModel) - ): - self.check_image(image, prompt, prompt_embeds) - elif ( - isinstance(self.controlnet, MultiControlNetModel) - or is_compiled - and isinstance(self.controlnet._orig_mod, MultiControlNetModel) - ): - if not isinstance(image, list): - raise TypeError("For multiple controlnets: `image` must be type `list`") - - # When `image` is a nested list: - # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) - elif any(isinstance(i, list) for i in image): - raise ValueError("A single batch of multiple conditionings are supported at the moment.") - elif len(image) != len(self.controlnet.nets): - raise ValueError( - f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." - ) - - for image_ in image: - self.check_image(image_, prompt, prompt_embeds) - else: - assert False - - # Check `controlnet_conditioning_scale` - if ( - isinstance(self.controlnet, ControlNetModel) - or is_compiled - and isinstance(self.controlnet._orig_mod, ControlNetModel) - ): - if not isinstance(controlnet_conditioning_scale, float): - raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") - elif ( - isinstance(self.controlnet, MultiControlNetModel) - or is_compiled - and isinstance(self.controlnet._orig_mod, MultiControlNetModel) - ): - if isinstance(controlnet_conditioning_scale, list): - if any(isinstance(i, list) for i in controlnet_conditioning_scale): - raise ValueError("A single batch of multiple conditionings are supported at the moment.") - elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( - self.controlnet.nets - ): - raise ValueError( - "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" - " the same length as the number of controlnets" - ) - else: - assert False - - if not isinstance(control_guidance_start, (tuple, list)): - control_guidance_start = [control_guidance_start] - - if not isinstance(control_guidance_end, (tuple, list)): - control_guidance_end = [control_guidance_end] - - if len(control_guidance_start) != len(control_guidance_end): - raise ValueError( - f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." - ) - - if isinstance(self.controlnet, MultiControlNetModel): - if len(control_guidance_start) != len(self.controlnet.nets): - raise ValueError( - f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." - ) - - for start, end in zip(control_guidance_start, control_guidance_end): - if start >= end: - raise ValueError( - f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." - ) - if start < 0.0: - raise ValueError(f"control guidance start: {start} can't be smaller than 0.") - if end > 1.0: - raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") - - def check_image(self, image, prompt, prompt_embeds): - image_is_pil = isinstance(image, PIL.Image.Image) - image_is_tensor = isinstance(image, torch.Tensor) - image_is_np = isinstance(image, np.ndarray) - image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) - image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) - image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) - - if ( - not image_is_pil - and not image_is_tensor - and not image_is_np - and not image_is_pil_list - and not image_is_tensor_list - and not image_is_np_list - ): - raise TypeError( - f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" - ) - - if image_is_pil: - image_batch_size = 1 - else: - image_batch_size = len(image) - - if prompt is not None and isinstance(prompt, str): - prompt_batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - prompt_batch_size = len(prompt) - elif prompt_embeds is not None: - prompt_batch_size = prompt_embeds.shape[0] - - if image_batch_size != 1 and image_batch_size != prompt_batch_size: - raise ValueError( - f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" - ) - - def prepare_image( - self, - image, - width, - height, - batch_size, - num_images_per_prompt, - device, - dtype, - do_classifier_free_guidance=False, - guess_mode=False, - ): - image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) - image_batch_size = image.shape[0] - - if image_batch_size == 1: - repeat_by = batch_size - else: - # image batch size is the same as prompt batch size - repeat_by = num_images_per_prompt - - image = image.repeat_interleave(repeat_by, dim=0) - - image = image.to(device=device, dtype=dtype) - - if do_classifier_free_guidance and not guess_mode: - image = torch.cat([image] * 2) - - return image - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents - def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): - shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if latents is None: - latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) - else: - latents = latents.to(device) - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - return latents - - @torch.no_grad() - @replace_example_docstring(EXAMPLE_DOC_STRING) - def __call__( - self, - prompt: Union[str, List[str]] = None, - image: PipelineImageInput = None, - height: Optional[int] = None, - width: Optional[int] = None, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.FloatTensor] = None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: int = 1, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - controlnet_conditioning_scale: Union[float, List[float]] = 1.0, - guess_mode: bool = False, - control_guidance_start: Union[float, List[float]] = 0.0, - control_guidance_end: Union[float, List[float]] = 1.0, - clip_skip: Optional[int] = None, - ): - r""" - The call function to the pipeline for generation. - - Args: - prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. - image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: - `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): - The ControlNet input condition to provide guidance to the `unet` for generation. If the type is - specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be - accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height - and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in - `init`, images must be passed as a list such that each element of the list can be correctly batched for - input to a single ControlNet. - height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 7.5): - A higher guidance scale value encourages the model to generate images closely linked to the text - `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide what to not include in image generation. If not defined, you need to - pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies - to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make - generation deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor is generated by sampling using the supplied random `generator`. - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not - provided, text embeddings are generated from the `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If - not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generated image. Choose between `PIL.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that calls every `callback_steps` steps during inference. The function is called with the - following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function is called. If not specified, the callback is called at - every step. - cross_attention_kwargs (`dict`, *optional*): - A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in - [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). - controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): - The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added - to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set - the corresponding scale as a list. - guess_mode (`bool`, *optional*, defaults to `False`): - The ControlNet encoder tries to recognize the content of the input image even if you remove all - prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. - control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): - The percentage of total steps at which the ControlNet starts applying. - control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): - The percentage of total steps at which the ControlNet stops applying. - clip_skip (`int`, *optional*): - Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that - the output of the pre-final layer will be used for computing the prompt embeddings. - - Examples: - - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, - otherwise a `tuple` is returned where the first element is a list with the generated images and the - second element is a list of `bool`s indicating whether the corresponding generated image contains - "not-safe-for-work" (nsfw) content. - """ - controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet - - # align format for control guidance - if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): - control_guidance_start = len(control_guidance_end) * [control_guidance_start] - elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): - control_guidance_end = len(control_guidance_start) * [control_guidance_end] - elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): - mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 - control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [ - control_guidance_end - ] - - # 1. Check inputs. Raise error if not correct - self.check_inputs( - prompt, - image, - callback_steps, - negative_prompt, - prompt_embeds, - negative_prompt_embeds, - controlnet_conditioning_scale, - control_guidance_start, - control_guidance_end, - ) - - # 2. Define call parameters - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - device = self._execution_device - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): - controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) - - global_pool_conditions = ( - controlnet.config.global_pool_conditions - if isinstance(controlnet, ControlNetModel) - else controlnet.nets[0].config.global_pool_conditions - ) - guess_mode = guess_mode or global_pool_conditions - - # 3. Encode input prompt - text_encoder_lora_scale = ( - cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None - ) - prompt_embeds, negative_prompt_embeds = self.encode_prompt( - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt, - prompt_embeds=prompt_embeds, - negative_prompt_embeds=negative_prompt_embeds, - lora_scale=text_encoder_lora_scale, - clip_skip=clip_skip, - ) - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - if do_classifier_free_guidance: - prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) - - # 4. Prepare image - if isinstance(controlnet, ControlNetModel): - image = self.prepare_image( - image=image, - width=width, - height=height, - batch_size=batch_size * num_images_per_prompt, - num_images_per_prompt=num_images_per_prompt, - device=device, - dtype=controlnet.dtype, - do_classifier_free_guidance=do_classifier_free_guidance, - guess_mode=guess_mode, - ) - height, width = image.shape[-2:] - elif isinstance(controlnet, MultiControlNetModel): - images = [] - - for image_ in image: - image_ = self.prepare_image( - image=image_, - width=width, - height=height, - batch_size=batch_size * num_images_per_prompt, - num_images_per_prompt=num_images_per_prompt, - device=device, - dtype=controlnet.dtype, - do_classifier_free_guidance=do_classifier_free_guidance, - guess_mode=guess_mode, - ) - - images.append(image_) - - image = images - height, width = image[0].shape[-2:] - else: - assert False - - # 5. Prepare timesteps - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps = self.scheduler.timesteps - - # 6. Prepare latent variables - num_channels_latents = self.unet.config.in_channels - latents = self.prepare_latents( - batch_size * num_images_per_prompt, - num_channels_latents, - height, - width, - prompt_embeds.dtype, - device, - generator, - latents, - ) - - # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # 7.1 Create tensor stating which controlnets to keep - controlnet_keep = [] - for i in range(len(timesteps)): - keeps = [ - 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) - for s, e in zip(control_guidance_start, control_guidance_end) - ] - controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) - - # 8. Denoising loop - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - with self.progress_bar(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # controlnet(s) inference - if guess_mode and do_classifier_free_guidance: - # Infer ControlNet only for the conditional batch. - control_model_input = latents - control_model_input = self.scheduler.scale_model_input(control_model_input, t) - controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] - else: - control_model_input = latent_model_input - controlnet_prompt_embeds = prompt_embeds - - if isinstance(controlnet_keep[i], list): - cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] - else: - controlnet_cond_scale = controlnet_conditioning_scale - if isinstance(controlnet_cond_scale, list): - controlnet_cond_scale = controlnet_cond_scale[0] - cond_scale = controlnet_cond_scale * controlnet_keep[i] - - down_block_res_samples, mid_block_res_sample = self.controlnet( - control_model_input, - t, - encoder_hidden_states=controlnet_prompt_embeds, - controlnet_cond=image, - conditioning_scale=cond_scale, - guess_mode=guess_mode, - return_dict=False, - ) - - if guess_mode and do_classifier_free_guidance: - # Infered ControlNet only for the conditional batch. - # To apply the output of ControlNet to both the unconditional and conditional batches, - # add 0 to the unconditional batch to keep it unchanged. - down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] - mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) - - # predict the noise residual - noise_pred = self.unet( - latent_model_input, - t, - encoder_hidden_states=prompt_embeds, - cross_attention_kwargs=cross_attention_kwargs, - down_block_additional_residuals=down_block_res_samples, - mid_block_additional_residual=mid_block_res_sample, - return_dict=False, - )[0] - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - # If we do sequential model offloading, let's offload unet and controlnet - # manually for max memory savings - if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: - self.unet.to("cpu") - self.controlnet.to("cpu") - torch.cuda.empty_cache() - - if not output_type == "latent": - image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] - image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) - else: - image = latents - has_nsfw_concept = None - - if has_nsfw_concept is None: - do_denormalize = [True] * image.shape[0] - else: - do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] - - image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) - - # Offload all models - self.maybe_free_model_hooks() - - if not return_dict: - return (image, has_nsfw_concept) - - return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/spaces/peteralexandercharles/runwayml-stable-diffusion-v1-5/README.md b/spaces/peteralexandercharles/runwayml-stable-diffusion-v1-5/README.md deleted file mode 100644 index 827b4d8706e8434eaa4b199addd4d7b0bddc32c5..0000000000000000000000000000000000000000 --- a/spaces/peteralexandercharles/runwayml-stable-diffusion-v1-5/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Runwayml Stable Diffusion V1 5 -emoji: 🐢 -colorFrom: pink -colorTo: pink -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/pikto/Elite-freegpt-webui/client/css/select.css b/spaces/pikto/Elite-freegpt-webui/client/css/select.css deleted file mode 100644 index 0d11898b9ffd64b6c07fc74d45fb1cfde3c43888..0000000000000000000000000000000000000000 --- a/spaces/pikto/Elite-freegpt-webui/client/css/select.css +++ /dev/null @@ -1,20 +0,0 @@ -select { - -webkit-border-radius: 8px; - -moz-border-radius: 8px; - border-radius: 8px; - - -webkit-backdrop-filter: blur(20px); - backdrop-filter: blur(20px); - - cursor: pointer; - background-color: var(--blur-bg); - border: 1px solid var(--blur-border); - color: var(--colour-3); - display: block; - position: relative; - overflow: hidden; - outline: none; - padding: 8px 16px; - - appearance: none; -} diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/locations/_distutils.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/locations/_distutils.py deleted file mode 100644 index 92bd93179c5cd3cb377c8b9f1e9d22d13fd7d003..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/locations/_distutils.py +++ /dev/null @@ -1,173 +0,0 @@ -"""Locations where we look for configs, install stuff, etc""" - -# The following comment should be removed at some point in the future. -# mypy: strict-optional=False - -# If pip's going to use distutils, it should not be using the copy that setuptools -# might have injected into the environment. This is done by removing the injected -# shim, if it's injected. -# -# See https://github.com/pypa/pip/issues/8761 for the original discussion and -# rationale for why this is done within pip. -try: - __import__("_distutils_hack").remove_shim() -except (ImportError, AttributeError): - pass - -import logging -import os -import sys -from distutils.cmd import Command as DistutilsCommand -from distutils.command.install import SCHEME_KEYS -from distutils.command.install import install as distutils_install_command -from distutils.sysconfig import get_python_lib -from typing import Dict, List, Optional, Union, cast - -from pip._internal.models.scheme import Scheme -from pip._internal.utils.compat import WINDOWS -from pip._internal.utils.virtualenv import running_under_virtualenv - -from .base import get_major_minor_version - -logger = logging.getLogger(__name__) - - -def distutils_scheme( - dist_name: str, - user: bool = False, - home: Optional[str] = None, - root: Optional[str] = None, - isolated: bool = False, - prefix: Optional[str] = None, - *, - ignore_config_files: bool = False, -) -> Dict[str, str]: - """ - Return a distutils install scheme - """ - from distutils.dist import Distribution - - dist_args: Dict[str, Union[str, List[str]]] = {"name": dist_name} - if isolated: - dist_args["script_args"] = ["--no-user-cfg"] - - d = Distribution(dist_args) - if not ignore_config_files: - try: - d.parse_config_files() - except UnicodeDecodeError: - # Typeshed does not include find_config_files() for some reason. - paths = d.find_config_files() # type: ignore - logger.warning( - "Ignore distutils configs in %s due to encoding errors.", - ", ".join(os.path.basename(p) for p in paths), - ) - obj: Optional[DistutilsCommand] = None - obj = d.get_command_obj("install", create=True) - assert obj is not None - i = cast(distutils_install_command, obj) - # NOTE: setting user or home has the side-effect of creating the home dir - # or user base for installations during finalize_options() - # ideally, we'd prefer a scheme class that has no side-effects. - assert not (user and prefix), f"user={user} prefix={prefix}" - assert not (home and prefix), f"home={home} prefix={prefix}" - i.user = user or i.user - if user or home: - i.prefix = "" - i.prefix = prefix or i.prefix - i.home = home or i.home - i.root = root or i.root - i.finalize_options() - - scheme = {} - for key in SCHEME_KEYS: - scheme[key] = getattr(i, "install_" + key) - - # install_lib specified in setup.cfg should install *everything* - # into there (i.e. it takes precedence over both purelib and - # platlib). Note, i.install_lib is *always* set after - # finalize_options(); we only want to override here if the user - # has explicitly requested it hence going back to the config - if "install_lib" in d.get_option_dict("install"): - scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib)) - - if running_under_virtualenv(): - if home: - prefix = home - elif user: - prefix = i.install_userbase - else: - prefix = i.prefix - scheme["headers"] = os.path.join( - prefix, - "include", - "site", - f"python{get_major_minor_version()}", - dist_name, - ) - - if root is not None: - path_no_drive = os.path.splitdrive(os.path.abspath(scheme["headers"]))[1] - scheme["headers"] = os.path.join(root, path_no_drive[1:]) - - return scheme - - -def get_scheme( - dist_name: str, - user: bool = False, - home: Optional[str] = None, - root: Optional[str] = None, - isolated: bool = False, - prefix: Optional[str] = None, -) -> Scheme: - """ - Get the "scheme" corresponding to the input parameters. The distutils - documentation provides the context for the available schemes: - https://docs.python.org/3/install/index.html#alternate-installation - - :param dist_name: the name of the package to retrieve the scheme for, used - in the headers scheme path - :param user: indicates to use the "user" scheme - :param home: indicates to use the "home" scheme and provides the base - directory for the same - :param root: root under which other directories are re-based - :param isolated: equivalent to --no-user-cfg, i.e. do not consider - ~/.pydistutils.cfg (posix) or ~/pydistutils.cfg (non-posix) for - scheme paths - :param prefix: indicates to use the "prefix" scheme and provides the - base directory for the same - """ - scheme = distutils_scheme(dist_name, user, home, root, isolated, prefix) - return Scheme( - platlib=scheme["platlib"], - purelib=scheme["purelib"], - headers=scheme["headers"], - scripts=scheme["scripts"], - data=scheme["data"], - ) - - -def get_bin_prefix() -> str: - # XXX: In old virtualenv versions, sys.prefix can contain '..' components, - # so we need to call normpath to eliminate them. - prefix = os.path.normpath(sys.prefix) - if WINDOWS: - bin_py = os.path.join(prefix, "Scripts") - # buildout uses 'bin' on Windows too? - if not os.path.exists(bin_py): - bin_py = os.path.join(prefix, "bin") - return bin_py - # Forcing to use /usr/local/bin for standard macOS framework installs - # Also log to ~/Library/Logs/ for use with the Console.app log viewer - if sys.platform[:6] == "darwin" and prefix[:16] == "/System/Library/": - return "/usr/local/bin" - return os.path.join(prefix, "bin") - - -def get_purelib() -> str: - return get_python_lib(plat_specific=False) - - -def get_platlib() -> str: - return get_python_lib(plat_specific=True) diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/jaraco/__init__.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/jaraco/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/command/__init__.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/command/__init__.py deleted file mode 100644 index 5acd7687d642f06de84b38f5842c41ae14d5f24a..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/command/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -from distutils.command.bdist import bdist -import sys - -if 'egg' not in bdist.format_commands: - try: - bdist.format_commands['egg'] = ('bdist_egg', "Python .egg file") - except TypeError: - # For backward compatibility with older distutils (stdlib) - bdist.format_command['egg'] = ('bdist_egg', "Python .egg file") - bdist.format_commands.append('egg') - -del bdist, sys diff --git a/spaces/portal/Control-Nets/index.html b/spaces/portal/Control-Nets/index.html deleted file mode 100644 index a9ce7a178a3240c18308d40cd06f4c672c697209..0000000000000000000000000000000000000000 --- a/spaces/portal/Control-Nets/index.html +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - \ No newline at end of file diff --git a/spaces/pragnakalp/Audio_Emotion_Recognition/ser_detection.py b/spaces/pragnakalp/Audio_Emotion_Recognition/ser_detection.py deleted file mode 100644 index d124a2c1326e3f9f724edaa3fb8f8db13049fa90..0000000000000000000000000000000000000000 --- a/spaces/pragnakalp/Audio_Emotion_Recognition/ser_detection.py +++ /dev/null @@ -1,149 +0,0 @@ -from __future__ import absolute_import, division, print_function, unicode_literals - -from flask import Flask, make_response, render_template, request, jsonify, redirect, url_for, send_from_directory -from flask_cors import CORS - -import sys -import os - -import librosa -import librosa.display -import numpy as np - -import warnings -import tensorflow as tf -from keras.models import Sequential -from keras.layers import Dense -from keras.utils import to_categorical -from keras.layers import Flatten, Dropout, Activation -from keras.layers import Conv2D, MaxPooling2D -from keras.layers.normalization import BatchNormalization -from sklearn.model_selection import train_test_split -from tqdm import tqdm -# import scipy.io.wavfile as wav -# from speechpy.feature import mfcc - -import pyaudio -import wave - -warnings.filterwarnings("ignore") - -app = Flask(__name__) -CORS(app) - -classLabels = ('Angry', 'Fear', 'Disgust', 'Happy', 'Sad', 'Surprised', 'Neutral') -numLabels = len(classLabels) -in_shape = (39,216) -model = Sequential() - -model.add(Conv2D(8, (13, 13), input_shape=(in_shape[0], in_shape[1], 1))) -model.add(BatchNormalization(axis=-1)) -model.add(Activation('relu')) -model.add(Conv2D(8, (13, 13))) -model.add(BatchNormalization(axis=-1)) -model.add(Activation('relu')) -model.add(MaxPooling2D(pool_size=(2, 1))) -model.add(Conv2D(8, (3, 3))) -model.add(BatchNormalization(axis=-1)) -model.add(Activation('relu')) -model.add(Conv2D(8, (1, 1))) -model.add(BatchNormalization(axis=-1)) -model.add(Activation('relu')) -model.add(MaxPooling2D(pool_size=(2, 1))) -model.add(Flatten()) -model.add(Dense(64)) -model.add(BatchNormalization()) -model.add(Activation('relu')) -model.add(Dropout(0.2)) - -model.add(Dense(numLabels, activation='softmax')) -model.compile(loss='binary_crossentropy', optimizer='adam', - metrics=['accuracy']) -# print(model.summary(), file=sys.stderr) - -model.load_weights('speech_emotion_detection_ravdess_savee.h5') - -def detect_emotion(file_name): - X, sample_rate = librosa.load(file_name, res_type='kaiser_best',duration=2.5,sr=22050*2,offset=0.5) - sample_rate = np.array(sample_rate) - mfccs = librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=39) - feature = mfccs - print("Feature_shape =>",feature.shape) - feature = feature.reshape(39, 216, 1) - result = classLabels[np.argmax(model.predict(np.array([feature])))] - print("Result ==> ",result) - return result - -@app.route("/speech-emotion-recognition/") -def emotion_detection(): - filename = 'audio_files/Happy.wav' - - result = detect_emotion(filename) - return result - -@app.route("/record_audio/") -def record_audio(): - CHUNK = 1024 - FORMAT = pyaudio.paInt16 #paInt8 - CHANNELS = 2 - RATE = 44100 #sample rate - RECORD_SECONDS = 4 - - fileList = os.listdir('recorded_audio') - print("Audio File List ==> ",fileList) - - new_wav_file = "" - - if(fileList): - filename_list = [] - for i in fileList: - print(i) - filename = i.split('.')[0] - filename_list.append(filename) - - max_file = max(filename_list) - print(type(max_file)) - - new_wav_file = int(max_file) + 1 - else: - new_wav_file="1" - - new_wav_file = str(new_wav_file) + ".wav" - filepath = os.path.join('recorded_audio', new_wav_file) - WAVE_OUTPUT_FILENAME = filepath - - print(WAVE_OUTPUT_FILENAME) - - p = pyaudio.PyAudio() - - stream = p.open(format=FORMAT, - channels=CHANNELS, - rate=RATE, - input=True, - frames_per_buffer=CHUNK) #buffer - - print("* recording") - - frames = [] - - for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)): - data = stream.read(CHUNK) - frames.append(data) # 2 bytes(16 bits) per channel - - print("* done recording") - - stream.stop_stream() - stream.close() - p.terminate() - - wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb') - wf.setnchannels(CHANNELS) - wf.setsampwidth(p.get_sample_size(FORMAT)) - wf.setframerate(RATE) - wf.writeframes(b''.join(frames)) - wf.close() - return "Audio Recorded" - -if __name__ == "__main__": - app.run() - diff --git a/spaces/productizationlabs/IBCFProductRecommendations/README.md b/spaces/productizationlabs/IBCFProductRecommendations/README.md deleted file mode 100644 index d7ac743287acec8c13ba9c2b2149c20a8fbf3664..0000000000000000000000000000000000000000 --- a/spaces/productizationlabs/IBCFProductRecommendations/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: IBCFProductRecommendations -emoji: ⚡ -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.22.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/voltLib/voltToFea.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/voltLib/voltToFea.py deleted file mode 100644 index 2265d5029533706e59d61d4626217d32b5066acc..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/voltLib/voltToFea.py +++ /dev/null @@ -1,726 +0,0 @@ -"""\ -MS VOLT ``.vtp`` to AFDKO ``.fea`` OpenType Layout converter. - -Usage ------ - -To convert a VTP project file: - - - $ fonttools voltLib.voltToFea input.vtp output.fea - -It is also possible convert font files with `TSIV` table (as saved from Volt), -in this case the glyph names used in the Volt project will be mapped to the -actual glyph names in the font files when written to the feature file: - - $ fonttools voltLib.voltToFea input.ttf output.fea - -The ``--quiet`` option can be used to suppress warnings. - -The ``--traceback`` can be used to get Python traceback in case of exceptions, -instead of suppressing the traceback. - - -Limitations ------------ - -* Not all VOLT features are supported, the script will error if it it - encounters something it does not understand. Please report an issue if this - happens. -* AFDKO feature file syntax for mark positioning is awkward and does not allow - setting the mark coverage. It also defines mark anchors globally, as a result - some mark positioning lookups might cover many marks than what was in the VOLT - file. This should not be an issue in practice, but if it is then the only way - is to modify the VOLT file or the generated feature file manually to use unique - mark anchors for each lookup. -* VOLT allows subtable breaks in any lookup type, but AFDKO feature file - implementations vary in their support; currently AFDKO’s makeOTF supports - subtable breaks in pair positioning lookups only, while FontTools’ feaLib - support it for most substitution lookups and only some positioning lookups. -""" - -import logging -import re -from io import StringIO - -from fontTools.feaLib import ast -from fontTools.ttLib import TTFont, TTLibError -from fontTools.voltLib import ast as VAst -from fontTools.voltLib.parser import Parser as VoltParser - -log = logging.getLogger("fontTools.voltLib.voltToFea") - -TABLES = ["GDEF", "GSUB", "GPOS"] - - -class MarkClassDefinition(ast.MarkClassDefinition): - def asFea(self, indent=""): - res = "" - if not getattr(self, "used", False): - res += "#" - res += ast.MarkClassDefinition.asFea(self, indent) - return res - - -# For sorting voltLib.ast.GlyphDefinition, see its use below. -class Group: - def __init__(self, group): - self.name = group.name.lower() - self.groups = [ - x.group.lower() for x in group.enum.enum if isinstance(x, VAst.GroupName) - ] - - def __lt__(self, other): - if self.name in other.groups: - return True - if other.name in self.groups: - return False - if self.groups and not other.groups: - return False - if not self.groups and other.groups: - return True - - -class VoltToFea: - _NOT_LOOKUP_NAME_RE = re.compile(r"[^A-Za-z_0-9.]") - _NOT_CLASS_NAME_RE = re.compile(r"[^A-Za-z_0-9.\-]") - - def __init__(self, file_or_path, font=None): - self._file_or_path = file_or_path - self._font = font - - self._glyph_map = {} - self._glyph_order = None - - self._gdef = {} - self._glyphclasses = {} - self._features = {} - self._lookups = {} - - self._marks = set() - self._ligatures = {} - - self._markclasses = {} - self._anchors = {} - - self._settings = {} - - self._lookup_names = {} - self._class_names = {} - - def _lookupName(self, name): - if name not in self._lookup_names: - res = self._NOT_LOOKUP_NAME_RE.sub("_", name) - while res in self._lookup_names.values(): - res += "_" - self._lookup_names[name] = res - return self._lookup_names[name] - - def _className(self, name): - if name not in self._class_names: - res = self._NOT_CLASS_NAME_RE.sub("_", name) - while res in self._class_names.values(): - res += "_" - self._class_names[name] = res - return self._class_names[name] - - def _collectStatements(self, doc, tables): - # Collect and sort group definitions first, to make sure a group - # definition that references other groups comes after them since VOLT - # does not enforce such ordering, and feature file require it. - groups = [s for s in doc.statements if isinstance(s, VAst.GroupDefinition)] - for statement in sorted(groups, key=lambda x: Group(x)): - self._groupDefinition(statement) - - for statement in doc.statements: - if isinstance(statement, VAst.GlyphDefinition): - self._glyphDefinition(statement) - elif isinstance(statement, VAst.AnchorDefinition): - if "GPOS" in tables: - self._anchorDefinition(statement) - elif isinstance(statement, VAst.SettingDefinition): - self._settingDefinition(statement) - elif isinstance(statement, VAst.GroupDefinition): - pass # Handled above - elif isinstance(statement, VAst.ScriptDefinition): - self._scriptDefinition(statement) - elif not isinstance(statement, VAst.LookupDefinition): - raise NotImplementedError(statement) - - # Lookup definitions need to be handled last as they reference glyph - # and mark classes that might be defined after them. - for statement in doc.statements: - if isinstance(statement, VAst.LookupDefinition): - if statement.pos and "GPOS" not in tables: - continue - if statement.sub and "GSUB" not in tables: - continue - self._lookupDefinition(statement) - - def _buildFeatureFile(self, tables): - doc = ast.FeatureFile() - statements = doc.statements - - if self._glyphclasses: - statements.append(ast.Comment("# Glyph classes")) - statements.extend(self._glyphclasses.values()) - - if self._markclasses: - statements.append(ast.Comment("\n# Mark classes")) - statements.extend(c[1] for c in sorted(self._markclasses.items())) - - if self._lookups: - statements.append(ast.Comment("\n# Lookups")) - for lookup in self._lookups.values(): - statements.extend(getattr(lookup, "targets", [])) - statements.append(lookup) - - # Prune features - features = self._features.copy() - for ftag in features: - scripts = features[ftag] - for stag in scripts: - langs = scripts[stag] - for ltag in langs: - langs[ltag] = [l for l in langs[ltag] if l.lower() in self._lookups] - scripts[stag] = {t: l for t, l in langs.items() if l} - features[ftag] = {t: s for t, s in scripts.items() if s} - features = {t: f for t, f in features.items() if f} - - if features: - statements.append(ast.Comment("# Features")) - for ftag, scripts in features.items(): - feature = ast.FeatureBlock(ftag) - stags = sorted(scripts, key=lambda k: 0 if k == "DFLT" else 1) - for stag in stags: - feature.statements.append(ast.ScriptStatement(stag)) - ltags = sorted(scripts[stag], key=lambda k: 0 if k == "dflt" else 1) - for ltag in ltags: - include_default = True if ltag == "dflt" else False - feature.statements.append( - ast.LanguageStatement(ltag, include_default=include_default) - ) - for name in scripts[stag][ltag]: - lookup = self._lookups[name.lower()] - lookupref = ast.LookupReferenceStatement(lookup) - feature.statements.append(lookupref) - statements.append(feature) - - if self._gdef and "GDEF" in tables: - classes = [] - for name in ("BASE", "MARK", "LIGATURE", "COMPONENT"): - if name in self._gdef: - classname = "GDEF_" + name.lower() - glyphclass = ast.GlyphClassDefinition(classname, self._gdef[name]) - statements.append(glyphclass) - classes.append(ast.GlyphClassName(glyphclass)) - else: - classes.append(None) - - gdef = ast.TableBlock("GDEF") - gdef.statements.append(ast.GlyphClassDefStatement(*classes)) - statements.append(gdef) - - return doc - - def convert(self, tables=None): - doc = VoltParser(self._file_or_path).parse() - - if tables is None: - tables = TABLES - if self._font is not None: - self._glyph_order = self._font.getGlyphOrder() - - self._collectStatements(doc, tables) - fea = self._buildFeatureFile(tables) - return fea.asFea() - - def _glyphName(self, glyph): - try: - name = glyph.glyph - except AttributeError: - name = glyph - return ast.GlyphName(self._glyph_map.get(name, name)) - - def _groupName(self, group): - try: - name = group.group - except AttributeError: - name = group - return ast.GlyphClassName(self._glyphclasses[name.lower()]) - - def _coverage(self, coverage): - items = [] - for item in coverage: - if isinstance(item, VAst.GlyphName): - items.append(self._glyphName(item)) - elif isinstance(item, VAst.GroupName): - items.append(self._groupName(item)) - elif isinstance(item, VAst.Enum): - items.append(self._enum(item)) - elif isinstance(item, VAst.Range): - items.append((item.start, item.end)) - else: - raise NotImplementedError(item) - return items - - def _enum(self, enum): - return ast.GlyphClass(self._coverage(enum.enum)) - - def _context(self, context): - out = [] - for item in context: - coverage = self._coverage(item) - if not isinstance(coverage, (tuple, list)): - coverage = [coverage] - out.extend(coverage) - return out - - def _groupDefinition(self, group): - name = self._className(group.name) - glyphs = self._enum(group.enum) - glyphclass = ast.GlyphClassDefinition(name, glyphs) - - self._glyphclasses[group.name.lower()] = glyphclass - - def _glyphDefinition(self, glyph): - try: - self._glyph_map[glyph.name] = self._glyph_order[glyph.id] - except TypeError: - pass - - if glyph.type in ("BASE", "MARK", "LIGATURE", "COMPONENT"): - if glyph.type not in self._gdef: - self._gdef[glyph.type] = ast.GlyphClass() - self._gdef[glyph.type].glyphs.append(self._glyphName(glyph.name)) - - if glyph.type == "MARK": - self._marks.add(glyph.name) - elif glyph.type == "LIGATURE": - self._ligatures[glyph.name] = glyph.components - - def _scriptDefinition(self, script): - stag = script.tag - for lang in script.langs: - ltag = lang.tag - for feature in lang.features: - lookups = {l.split("\\")[0]: True for l in feature.lookups} - ftag = feature.tag - if ftag not in self._features: - self._features[ftag] = {} - if stag not in self._features[ftag]: - self._features[ftag][stag] = {} - assert ltag not in self._features[ftag][stag] - self._features[ftag][stag][ltag] = lookups.keys() - - def _settingDefinition(self, setting): - if setting.name.startswith("COMPILER_"): - self._settings[setting.name] = setting.value - else: - log.warning(f"Unsupported setting ignored: {setting.name}") - - def _adjustment(self, adjustment): - adv, dx, dy, adv_adjust_by, dx_adjust_by, dy_adjust_by = adjustment - - adv_device = adv_adjust_by and adv_adjust_by.items() or None - dx_device = dx_adjust_by and dx_adjust_by.items() or None - dy_device = dy_adjust_by and dy_adjust_by.items() or None - - return ast.ValueRecord( - xPlacement=dx, - yPlacement=dy, - xAdvance=adv, - xPlaDevice=dx_device, - yPlaDevice=dy_device, - xAdvDevice=adv_device, - ) - - def _anchor(self, adjustment): - adv, dx, dy, adv_adjust_by, dx_adjust_by, dy_adjust_by = adjustment - - assert not adv_adjust_by - dx_device = dx_adjust_by and dx_adjust_by.items() or None - dy_device = dy_adjust_by and dy_adjust_by.items() or None - - return ast.Anchor( - dx or 0, - dy or 0, - xDeviceTable=dx_device or None, - yDeviceTable=dy_device or None, - ) - - def _anchorDefinition(self, anchordef): - anchorname = anchordef.name - glyphname = anchordef.glyph_name - anchor = self._anchor(anchordef.pos) - - if anchorname.startswith("MARK_"): - name = "_".join(anchorname.split("_")[1:]) - markclass = ast.MarkClass(self._className(name)) - glyph = self._glyphName(glyphname) - markdef = MarkClassDefinition(markclass, anchor, glyph) - self._markclasses[(glyphname, anchorname)] = markdef - else: - if glyphname not in self._anchors: - self._anchors[glyphname] = {} - if anchorname not in self._anchors[glyphname]: - self._anchors[glyphname][anchorname] = {} - self._anchors[glyphname][anchorname][anchordef.component] = anchor - - def _gposLookup(self, lookup, fealookup): - statements = fealookup.statements - - pos = lookup.pos - if isinstance(pos, VAst.PositionAdjustPairDefinition): - for (idx1, idx2), (pos1, pos2) in pos.adjust_pair.items(): - coverage_1 = pos.coverages_1[idx1 - 1] - coverage_2 = pos.coverages_2[idx2 - 1] - - # If not both are groups, use “enum pos” otherwise makeotf will - # fail. - enumerated = False - for item in coverage_1 + coverage_2: - if not isinstance(item, VAst.GroupName): - enumerated = True - - glyphs1 = self._coverage(coverage_1) - glyphs2 = self._coverage(coverage_2) - record1 = self._adjustment(pos1) - record2 = self._adjustment(pos2) - assert len(glyphs1) == 1 - assert len(glyphs2) == 1 - statements.append( - ast.PairPosStatement( - glyphs1[0], record1, glyphs2[0], record2, enumerated=enumerated - ) - ) - elif isinstance(pos, VAst.PositionAdjustSingleDefinition): - for a, b in pos.adjust_single: - glyphs = self._coverage(a) - record = self._adjustment(b) - assert len(glyphs) == 1 - statements.append( - ast.SinglePosStatement([(glyphs[0], record)], [], [], False) - ) - elif isinstance(pos, VAst.PositionAttachDefinition): - anchors = {} - for marks, classname in pos.coverage_to: - for mark in marks: - # Set actually used mark classes. Basically a hack to get - # around the feature file syntax limitation of making mark - # classes global and not allowing mark positioning to - # specify mark coverage. - for name in mark.glyphSet(): - key = (name, "MARK_" + classname) - self._markclasses[key].used = True - markclass = ast.MarkClass(self._className(classname)) - for base in pos.coverage: - for name in base.glyphSet(): - if name not in anchors: - anchors[name] = [] - if classname not in anchors[name]: - anchors[name].append(classname) - - for name in anchors: - components = 1 - if name in self._ligatures: - components = self._ligatures[name] - - marks = [] - for mark in anchors[name]: - markclass = ast.MarkClass(self._className(mark)) - for component in range(1, components + 1): - if len(marks) < component: - marks.append([]) - anchor = None - if component in self._anchors[name][mark]: - anchor = self._anchors[name][mark][component] - marks[component - 1].append((anchor, markclass)) - - base = self._glyphName(name) - if name in self._marks: - mark = ast.MarkMarkPosStatement(base, marks[0]) - elif name in self._ligatures: - mark = ast.MarkLigPosStatement(base, marks) - else: - mark = ast.MarkBasePosStatement(base, marks[0]) - statements.append(mark) - elif isinstance(pos, VAst.PositionAttachCursiveDefinition): - # Collect enter and exit glyphs - enter_coverage = [] - for coverage in pos.coverages_enter: - for base in coverage: - for name in base.glyphSet(): - enter_coverage.append(name) - exit_coverage = [] - for coverage in pos.coverages_exit: - for base in coverage: - for name in base.glyphSet(): - exit_coverage.append(name) - - # Write enter anchors, also check if the glyph has exit anchor and - # write it, too. - for name in enter_coverage: - glyph = self._glyphName(name) - entry = self._anchors[name]["entry"][1] - exit = None - if name in exit_coverage: - exit = self._anchors[name]["exit"][1] - exit_coverage.pop(exit_coverage.index(name)) - statements.append(ast.CursivePosStatement(glyph, entry, exit)) - - # Write any remaining exit anchors. - for name in exit_coverage: - glyph = self._glyphName(name) - exit = self._anchors[name]["exit"][1] - statements.append(ast.CursivePosStatement(glyph, None, exit)) - else: - raise NotImplementedError(pos) - - def _gposContextLookup( - self, lookup, prefix, suffix, ignore, fealookup, targetlookup - ): - statements = fealookup.statements - - assert not lookup.reversal - - pos = lookup.pos - if isinstance(pos, VAst.PositionAdjustPairDefinition): - for (idx1, idx2), (pos1, pos2) in pos.adjust_pair.items(): - glyphs1 = self._coverage(pos.coverages_1[idx1 - 1]) - glyphs2 = self._coverage(pos.coverages_2[idx2 - 1]) - assert len(glyphs1) == 1 - assert len(glyphs2) == 1 - glyphs = (glyphs1[0], glyphs2[0]) - - if ignore: - statement = ast.IgnorePosStatement([(prefix, glyphs, suffix)]) - else: - lookups = (targetlookup, targetlookup) - statement = ast.ChainContextPosStatement( - prefix, glyphs, suffix, lookups - ) - statements.append(statement) - elif isinstance(pos, VAst.PositionAdjustSingleDefinition): - glyphs = [ast.GlyphClass()] - for a, b in pos.adjust_single: - glyph = self._coverage(a) - glyphs[0].extend(glyph) - - if ignore: - statement = ast.IgnorePosStatement([(prefix, glyphs, suffix)]) - else: - statement = ast.ChainContextPosStatement( - prefix, glyphs, suffix, [targetlookup] - ) - statements.append(statement) - elif isinstance(pos, VAst.PositionAttachDefinition): - glyphs = [ast.GlyphClass()] - for coverage, _ in pos.coverage_to: - glyphs[0].extend(self._coverage(coverage)) - - if ignore: - statement = ast.IgnorePosStatement([(prefix, glyphs, suffix)]) - else: - statement = ast.ChainContextPosStatement( - prefix, glyphs, suffix, [targetlookup] - ) - statements.append(statement) - else: - raise NotImplementedError(pos) - - def _gsubLookup(self, lookup, prefix, suffix, ignore, chain, fealookup): - statements = fealookup.statements - - sub = lookup.sub - for key, val in sub.mapping.items(): - if not key or not val: - path, line, column = sub.location - log.warning(f"{path}:{line}:{column}: Ignoring empty substitution") - continue - statement = None - glyphs = self._coverage(key) - replacements = self._coverage(val) - if ignore: - chain_context = (prefix, glyphs, suffix) - statement = ast.IgnoreSubstStatement([chain_context]) - elif isinstance(sub, VAst.SubstitutionSingleDefinition): - assert len(glyphs) == 1 - assert len(replacements) == 1 - statement = ast.SingleSubstStatement( - glyphs, replacements, prefix, suffix, chain - ) - elif isinstance(sub, VAst.SubstitutionReverseChainingSingleDefinition): - assert len(glyphs) == 1 - assert len(replacements) == 1 - statement = ast.ReverseChainSingleSubstStatement( - prefix, suffix, glyphs, replacements - ) - elif isinstance(sub, VAst.SubstitutionMultipleDefinition): - assert len(glyphs) == 1 - statement = ast.MultipleSubstStatement( - prefix, glyphs[0], suffix, replacements, chain - ) - elif isinstance(sub, VAst.SubstitutionLigatureDefinition): - assert len(replacements) == 1 - statement = ast.LigatureSubstStatement( - prefix, glyphs, suffix, replacements[0], chain - ) - else: - raise NotImplementedError(sub) - statements.append(statement) - - def _lookupDefinition(self, lookup): - mark_attachement = None - mark_filtering = None - - flags = 0 - if lookup.direction == "RTL": - flags |= 1 - if not lookup.process_base: - flags |= 2 - # FIXME: Does VOLT support this? - # if not lookup.process_ligatures: - # flags |= 4 - if not lookup.process_marks: - flags |= 8 - elif isinstance(lookup.process_marks, str): - mark_attachement = self._groupName(lookup.process_marks) - elif lookup.mark_glyph_set is not None: - mark_filtering = self._groupName(lookup.mark_glyph_set) - - lookupflags = None - if flags or mark_attachement is not None or mark_filtering is not None: - lookupflags = ast.LookupFlagStatement( - flags, mark_attachement, mark_filtering - ) - if "\\" in lookup.name: - # Merge sub lookups as subtables (lookups named “base\sub”), - # makeotf/feaLib will issue a warning and ignore the subtable - # statement if it is not a pairpos lookup, though. - name = lookup.name.split("\\")[0] - if name.lower() not in self._lookups: - fealookup = ast.LookupBlock(self._lookupName(name)) - if lookupflags is not None: - fealookup.statements.append(lookupflags) - fealookup.statements.append(ast.Comment("# " + lookup.name)) - else: - fealookup = self._lookups[name.lower()] - fealookup.statements.append(ast.SubtableStatement()) - fealookup.statements.append(ast.Comment("# " + lookup.name)) - self._lookups[name.lower()] = fealookup - else: - fealookup = ast.LookupBlock(self._lookupName(lookup.name)) - if lookupflags is not None: - fealookup.statements.append(lookupflags) - self._lookups[lookup.name.lower()] = fealookup - - if lookup.comments is not None: - fealookup.statements.append(ast.Comment("# " + lookup.comments)) - - contexts = [] - if lookup.context: - for context in lookup.context: - prefix = self._context(context.left) - suffix = self._context(context.right) - ignore = context.ex_or_in == "EXCEPT_CONTEXT" - contexts.append([prefix, suffix, ignore, False]) - # It seems that VOLT will create contextual substitution using - # only the input if there is no other contexts in this lookup. - if ignore and len(lookup.context) == 1: - contexts.append([[], [], False, True]) - else: - contexts.append([[], [], False, False]) - - targetlookup = None - for prefix, suffix, ignore, chain in contexts: - if lookup.sub is not None: - self._gsubLookup(lookup, prefix, suffix, ignore, chain, fealookup) - - if lookup.pos is not None: - if self._settings.get("COMPILER_USEEXTENSIONLOOKUPS"): - fealookup.use_extension = True - if prefix or suffix or chain or ignore: - if not ignore and targetlookup is None: - targetname = self._lookupName(lookup.name + " target") - targetlookup = ast.LookupBlock(targetname) - fealookup.targets = getattr(fealookup, "targets", []) - fealookup.targets.append(targetlookup) - self._gposLookup(lookup, targetlookup) - self._gposContextLookup( - lookup, prefix, suffix, ignore, fealookup, targetlookup - ) - else: - self._gposLookup(lookup, fealookup) - - -def main(args=None): - """Convert MS VOLT to AFDKO feature files.""" - - import argparse - from pathlib import Path - - from fontTools import configLogger - - parser = argparse.ArgumentParser( - "fonttools voltLib.voltToFea", description=main.__doc__ - ) - parser.add_argument( - "input", metavar="INPUT", type=Path, help="input font/VTP file to process" - ) - parser.add_argument( - "featurefile", metavar="OUTPUT", type=Path, help="output feature file" - ) - parser.add_argument( - "-t", - "--table", - action="append", - choices=TABLES, - dest="tables", - help="List of tables to write, by default all tables are written", - ) - parser.add_argument( - "-q", "--quiet", action="store_true", help="Suppress non-error messages" - ) - parser.add_argument( - "--traceback", action="store_true", help="Don’t catch exceptions" - ) - - options = parser.parse_args(args) - - configLogger(level=("ERROR" if options.quiet else "INFO")) - - file_or_path = options.input - font = None - try: - font = TTFont(file_or_path) - if "TSIV" in font: - file_or_path = StringIO(font["TSIV"].data.decode("utf-8")) - else: - log.error('"TSIV" table is missing, font was not saved from VOLT?') - return 1 - except TTLibError: - pass - - converter = VoltToFea(file_or_path, font) - try: - fea = converter.convert(options.tables) - except NotImplementedError as e: - if options.traceback: - raise - location = getattr(e.args[0], "location", None) - message = f'"{e}" is not supported' - if location: - path, line, column = location - log.error(f"{path}:{line}:{column}: {message}") - else: - log.error(message) - return 1 - with open(options.featurefile, "w") as feafile: - feafile.write(fea) - - -if __name__ == "__main__": - import sys - - sys.exit(main()) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/httpcore/_async/http11.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/httpcore/_async/http11.py deleted file mode 100644 index 32fa3a6f23f5205a13fd94dddf4b8bc274a3c463..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/httpcore/_async/http11.py +++ /dev/null @@ -1,343 +0,0 @@ -import enum -import logging -import time -from types import TracebackType -from typing import ( - AsyncIterable, - AsyncIterator, - List, - Optional, - Tuple, - Type, - Union, - cast, -) - -import h11 - -from .._backends.base import AsyncNetworkStream -from .._exceptions import ( - ConnectionNotAvailable, - LocalProtocolError, - RemoteProtocolError, - WriteError, - map_exceptions, -) -from .._models import Origin, Request, Response -from .._synchronization import AsyncLock, AsyncShieldCancellation -from .._trace import Trace -from .interfaces import AsyncConnectionInterface - -logger = logging.getLogger("httpcore.http11") - - -# A subset of `h11.Event` types supported by `_send_event` -H11SendEvent = Union[ - h11.Request, - h11.Data, - h11.EndOfMessage, -] - - -class HTTPConnectionState(enum.IntEnum): - NEW = 0 - ACTIVE = 1 - IDLE = 2 - CLOSED = 3 - - -class AsyncHTTP11Connection(AsyncConnectionInterface): - READ_NUM_BYTES = 64 * 1024 - MAX_INCOMPLETE_EVENT_SIZE = 100 * 1024 - - def __init__( - self, - origin: Origin, - stream: AsyncNetworkStream, - keepalive_expiry: Optional[float] = None, - ) -> None: - self._origin = origin - self._network_stream = stream - self._keepalive_expiry: Optional[float] = keepalive_expiry - self._expire_at: Optional[float] = None - self._state = HTTPConnectionState.NEW - self._state_lock = AsyncLock() - self._request_count = 0 - self._h11_state = h11.Connection( - our_role=h11.CLIENT, - max_incomplete_event_size=self.MAX_INCOMPLETE_EVENT_SIZE, - ) - - async def handle_async_request(self, request: Request) -> Response: - if not self.can_handle_request(request.url.origin): - raise RuntimeError( - f"Attempted to send request to {request.url.origin} on connection " - f"to {self._origin}" - ) - - async with self._state_lock: - if self._state in (HTTPConnectionState.NEW, HTTPConnectionState.IDLE): - self._request_count += 1 - self._state = HTTPConnectionState.ACTIVE - self._expire_at = None - else: - raise ConnectionNotAvailable() - - try: - kwargs = {"request": request} - try: - async with Trace( - "send_request_headers", logger, request, kwargs - ) as trace: - await self._send_request_headers(**kwargs) - async with Trace("send_request_body", logger, request, kwargs) as trace: - await self._send_request_body(**kwargs) - except WriteError: - # If we get a write error while we're writing the request, - # then we supress this error and move on to attempting to - # read the response. Servers can sometimes close the request - # pre-emptively and then respond with a well formed HTTP - # error response. - pass - - async with Trace( - "receive_response_headers", logger, request, kwargs - ) as trace: - ( - http_version, - status, - reason_phrase, - headers, - ) = await self._receive_response_headers(**kwargs) - trace.return_value = ( - http_version, - status, - reason_phrase, - headers, - ) - - return Response( - status=status, - headers=headers, - content=HTTP11ConnectionByteStream(self, request), - extensions={ - "http_version": http_version, - "reason_phrase": reason_phrase, - "network_stream": self._network_stream, - }, - ) - except BaseException as exc: - with AsyncShieldCancellation(): - async with Trace("response_closed", logger, request) as trace: - await self._response_closed() - raise exc - - # Sending the request... - - async def _send_request_headers(self, request: Request) -> None: - timeouts = request.extensions.get("timeout", {}) - timeout = timeouts.get("write", None) - - with map_exceptions({h11.LocalProtocolError: LocalProtocolError}): - event = h11.Request( - method=request.method, - target=request.url.target, - headers=request.headers, - ) - await self._send_event(event, timeout=timeout) - - async def _send_request_body(self, request: Request) -> None: - timeouts = request.extensions.get("timeout", {}) - timeout = timeouts.get("write", None) - - assert isinstance(request.stream, AsyncIterable) - async for chunk in request.stream: - event = h11.Data(data=chunk) - await self._send_event(event, timeout=timeout) - - await self._send_event(h11.EndOfMessage(), timeout=timeout) - - async def _send_event( - self, event: h11.Event, timeout: Optional[float] = None - ) -> None: - bytes_to_send = self._h11_state.send(event) - if bytes_to_send is not None: - await self._network_stream.write(bytes_to_send, timeout=timeout) - - # Receiving the response... - - async def _receive_response_headers( - self, request: Request - ) -> Tuple[bytes, int, bytes, List[Tuple[bytes, bytes]]]: - timeouts = request.extensions.get("timeout", {}) - timeout = timeouts.get("read", None) - - while True: - event = await self._receive_event(timeout=timeout) - if isinstance(event, h11.Response): - break - if ( - isinstance(event, h11.InformationalResponse) - and event.status_code == 101 - ): - break - - http_version = b"HTTP/" + event.http_version - - # h11 version 0.11+ supports a `raw_items` interface to get the - # raw header casing, rather than the enforced lowercase headers. - headers = event.headers.raw_items() - - return http_version, event.status_code, event.reason, headers - - async def _receive_response_body(self, request: Request) -> AsyncIterator[bytes]: - timeouts = request.extensions.get("timeout", {}) - timeout = timeouts.get("read", None) - - while True: - event = await self._receive_event(timeout=timeout) - if isinstance(event, h11.Data): - yield bytes(event.data) - elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)): - break - - async def _receive_event( - self, timeout: Optional[float] = None - ) -> Union[h11.Event, Type[h11.PAUSED]]: - while True: - with map_exceptions({h11.RemoteProtocolError: RemoteProtocolError}): - event = self._h11_state.next_event() - - if event is h11.NEED_DATA: - data = await self._network_stream.read( - self.READ_NUM_BYTES, timeout=timeout - ) - - # If we feed this case through h11 we'll raise an exception like: - # - # httpcore.RemoteProtocolError: can't handle event type - # ConnectionClosed when role=SERVER and state=SEND_RESPONSE - # - # Which is accurate, but not very informative from an end-user - # perspective. Instead we handle this case distinctly and treat - # it as a ConnectError. - if data == b"" and self._h11_state.their_state == h11.SEND_RESPONSE: - msg = "Server disconnected without sending a response." - raise RemoteProtocolError(msg) - - self._h11_state.receive_data(data) - else: - # mypy fails to narrow the type in the above if statement above - return cast(Union[h11.Event, Type[h11.PAUSED]], event) - - async def _response_closed(self) -> None: - async with self._state_lock: - if ( - self._h11_state.our_state is h11.DONE - and self._h11_state.their_state is h11.DONE - ): - self._state = HTTPConnectionState.IDLE - self._h11_state.start_next_cycle() - if self._keepalive_expiry is not None: - now = time.monotonic() - self._expire_at = now + self._keepalive_expiry - else: - await self.aclose() - - # Once the connection is no longer required... - - async def aclose(self) -> None: - # Note that this method unilaterally closes the connection, and does - # not have any kind of locking in place around it. - self._state = HTTPConnectionState.CLOSED - await self._network_stream.aclose() - - # The AsyncConnectionInterface methods provide information about the state of - # the connection, allowing for a connection pooling implementation to - # determine when to reuse and when to close the connection... - - def can_handle_request(self, origin: Origin) -> bool: - return origin == self._origin - - def is_available(self) -> bool: - # Note that HTTP/1.1 connections in the "NEW" state are not treated as - # being "available". The control flow which created the connection will - # be able to send an outgoing request, but the connection will not be - # acquired from the connection pool for any other request. - return self._state == HTTPConnectionState.IDLE - - def has_expired(self) -> bool: - now = time.monotonic() - keepalive_expired = self._expire_at is not None and now > self._expire_at - - # If the HTTP connection is idle but the socket is readable, then the - # only valid state is that the socket is about to return b"", indicating - # a server-initiated disconnect. - server_disconnected = ( - self._state == HTTPConnectionState.IDLE - and self._network_stream.get_extra_info("is_readable") - ) - - return keepalive_expired or server_disconnected - - def is_idle(self) -> bool: - return self._state == HTTPConnectionState.IDLE - - def is_closed(self) -> bool: - return self._state == HTTPConnectionState.CLOSED - - def info(self) -> str: - origin = str(self._origin) - return ( - f"{origin!r}, HTTP/1.1, {self._state.name}, " - f"Request Count: {self._request_count}" - ) - - def __repr__(self) -> str: - class_name = self.__class__.__name__ - origin = str(self._origin) - return ( - f"<{class_name} [{origin!r}, {self._state.name}, " - f"Request Count: {self._request_count}]>" - ) - - # These context managers are not used in the standard flow, but are - # useful for testing or working with connection instances directly. - - async def __aenter__(self) -> "AsyncHTTP11Connection": - return self - - async def __aexit__( - self, - exc_type: Optional[Type[BaseException]] = None, - exc_value: Optional[BaseException] = None, - traceback: Optional[TracebackType] = None, - ) -> None: - await self.aclose() - - -class HTTP11ConnectionByteStream: - def __init__(self, connection: AsyncHTTP11Connection, request: Request) -> None: - self._connection = connection - self._request = request - self._closed = False - - async def __aiter__(self) -> AsyncIterator[bytes]: - kwargs = {"request": self._request} - try: - async with Trace("receive_response_body", logger, self._request, kwargs): - async for chunk in self._connection._receive_response_body(**kwargs): - yield chunk - except BaseException as exc: - # If we get an exception while streaming the response, - # we want to close the response (and possibly the connection) - # before raising that exception. - with AsyncShieldCancellation(): - await self.aclose() - raise exc - - async def aclose(self) -> None: - if not self._closed: - self._closed = True - async with Trace("response_closed", logger, self._request): - await self._connection._response_closed() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/huggingface_hub/commands/scan_cache.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/huggingface_hub/commands/scan_cache.py deleted file mode 100644 index 392bec966e11aa9c8a3a80f60c760c54329b403c..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/huggingface_hub/commands/scan_cache.py +++ /dev/null @@ -1,138 +0,0 @@ -# coding=utf-8 -# Copyright 2022-present, the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Contains command to scan the HF cache directory. - -Usage: - huggingface-cli scan-cache - huggingface-cli scan-cache -v - huggingface-cli scan-cache -vvv - huggingface-cli scan-cache --dir ~/.cache/huggingface/hub -""" -import time -from argparse import Namespace, _SubParsersAction -from typing import Optional - -from ..utils import CacheNotFound, HFCacheInfo, scan_cache_dir -from . import BaseHuggingfaceCLICommand -from ._cli_utils import ANSI, tabulate - - -class ScanCacheCommand(BaseHuggingfaceCLICommand): - @staticmethod - def register_subcommand(parser: _SubParsersAction): - scan_cache_parser = parser.add_parser("scan-cache", help="Scan cache directory.") - - scan_cache_parser.add_argument( - "--dir", - type=str, - default=None, - help="cache directory to scan (optional). Default to the default HuggingFace cache.", - ) - scan_cache_parser.add_argument( - "-v", - "--verbose", - action="count", - default=0, - help="show a more verbose output", - ) - scan_cache_parser.set_defaults(func=ScanCacheCommand) - - def __init__(self, args: Namespace) -> None: - self.verbosity: int = args.verbose - self.cache_dir: Optional[str] = args.dir - - def run(self): - try: - t0 = time.time() - hf_cache_info = scan_cache_dir(self.cache_dir) - t1 = time.time() - except CacheNotFound as exc: - cache_dir = exc.cache_dir - print(f"Cache directory not found: {cache_dir}") - return - - self._print_hf_cache_info_as_table(hf_cache_info) - - print( - f"\nDone in {round(t1-t0,1)}s. Scanned {len(hf_cache_info.repos)} repo(s)" - f" for a total of {ANSI.red(hf_cache_info.size_on_disk_str)}." - ) - if len(hf_cache_info.warnings) > 0: - message = f"Got {len(hf_cache_info.warnings)} warning(s) while scanning." - if self.verbosity >= 3: - print(ANSI.gray(message)) - for warning in hf_cache_info.warnings: - print(ANSI.gray(warning)) - else: - print(ANSI.gray(message + " Use -vvv to print details.")) - - def _print_hf_cache_info_as_table(self, hf_cache_info: HFCacheInfo) -> None: - if self.verbosity == 0: - print( - tabulate( - rows=[ - [ - repo.repo_id, - repo.repo_type, - "{:>12}".format(repo.size_on_disk_str), - repo.nb_files, - repo.last_accessed_str, - repo.last_modified_str, - ", ".join(sorted(repo.refs)), - str(repo.repo_path), - ] - for repo in sorted(hf_cache_info.repos, key=lambda repo: repo.repo_path) - ], - headers=[ - "REPO ID", - "REPO TYPE", - "SIZE ON DISK", - "NB FILES", - "LAST_ACCESSED", - "LAST_MODIFIED", - "REFS", - "LOCAL PATH", - ], - ) - ) - else: - print( - tabulate( - rows=[ - [ - repo.repo_id, - repo.repo_type, - revision.commit_hash, - "{:>12}".format(revision.size_on_disk_str), - revision.nb_files, - revision.last_modified_str, - ", ".join(sorted(revision.refs)), - str(revision.snapshot_path), - ] - for repo in sorted(hf_cache_info.repos, key=lambda repo: repo.repo_path) - for revision in sorted(repo.revisions, key=lambda revision: revision.commit_hash) - ], - headers=[ - "REPO ID", - "REPO TYPE", - "REVISION", - "SIZE ON DISK", - "NB FILES", - "LAST_MODIFIED", - "REFS", - "LOCAL PATH", - ], - ) - ) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/markdown_it/presets/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/markdown_it/presets/__init__.py deleted file mode 100644 index 1e6796a2d29eb524c5f35df79055d0969fca02ba..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/markdown_it/presets/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -__all__ = ("commonmark", "default", "zero", "js_default", "gfm_like") - -from . import commonmark, default, zero -from ..utils import PresetType - -js_default = default - - -class gfm_like: # noqa: N801 - """GitHub Flavoured Markdown (GFM) like. - - This adds the linkify, table and strikethrough components to CommmonMark. - - Note, it lacks task-list items and raw HTML filtering, - to meet the the full GFM specification - (see https://github.github.com/gfm/#autolinks-extension-). - """ - - @staticmethod - def make() -> PresetType: - config = commonmark.make() - config["components"]["core"]["rules"].append("linkify") - config["components"]["block"]["rules"].append("table") - config["components"]["inline"]["rules"].extend(["strikethrough", "linkify"]) - config["components"]["inline"]["rules2"].append("strikethrough") - config["options"]["linkify"] = True - config["options"]["html"] = True - return config diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/backends/web_backend/css/mpl.css b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/backends/web_backend/css/mpl.css deleted file mode 100644 index e55733d25ecfc6237aa8052c1fc2b7e3f356da23..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/backends/web_backend/css/mpl.css +++ /dev/null @@ -1,84 +0,0 @@ -/* General styling */ -.ui-helper-clearfix:before, -.ui-helper-clearfix:after { - content: ""; - display: table; - border-collapse: collapse; -} -.ui-helper-clearfix:after { - clear: both; -} - -/* Header */ -.ui-widget-header { - border: 1px solid #dddddd; - border-top-left-radius: 6px; - border-top-right-radius: 6px; - background: #e9e9e9; - color: #333333; - font-weight: bold; -} - -/* Toolbar and items */ -.mpl-toolbar { - width: 100%; -} - -.mpl-toolbar div.mpl-button-group { - display: inline-block; -} - -.mpl-button-group + .mpl-button-group { - margin-left: 0.5em; -} - -.mpl-widget { - background-color: #fff; - border: 1px solid #ccc; - display: inline-block; - cursor: pointer; - color: #333; - padding: 6px; - vertical-align: middle; -} - -.mpl-widget:disabled, -.mpl-widget[disabled] { - background-color: #ddd; - border-color: #ddd !important; - cursor: not-allowed; -} - -.mpl-widget:disabled img, -.mpl-widget[disabled] img { - /* Convert black to grey */ - filter: contrast(0%); -} - -.mpl-widget.active img { - /* Convert black to tab:blue, approximately */ - filter: invert(34%) sepia(97%) saturate(468%) hue-rotate(162deg) brightness(96%) contrast(91%); -} - -button.mpl-widget:focus, -button.mpl-widget:hover { - background-color: #ddd; - border-color: #aaa; -} - -.mpl-button-group button.mpl-widget { - margin-left: -1px; -} -.mpl-button-group button.mpl-widget:first-child { - border-top-left-radius: 6px; - border-bottom-left-radius: 6px; - margin-left: 0px; -} -.mpl-button-group button.mpl-widget:last-child { - border-top-right-radius: 6px; - border-bottom-right-radius: 6px; -} - -select.mpl-widget { - cursor: default; -} diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/indexes/extension.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/indexes/extension.py deleted file mode 100644 index 61949531f37df38f74a37c00e66141313a4fd767..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/indexes/extension.py +++ /dev/null @@ -1,172 +0,0 @@ -""" -Shared methods for Index subclasses backed by ExtensionArray. -""" -from __future__ import annotations - -from typing import ( - TYPE_CHECKING, - Callable, - TypeVar, -) - -from pandas.util._decorators import cache_readonly - -from pandas.core.dtypes.generic import ABCDataFrame - -from pandas.core.indexes.base import Index - -if TYPE_CHECKING: - import numpy as np - - from pandas._typing import ( - ArrayLike, - npt, - ) - - from pandas.core.arrays import IntervalArray - from pandas.core.arrays._mixins import NDArrayBackedExtensionArray - -_ExtensionIndexT = TypeVar("_ExtensionIndexT", bound="ExtensionIndex") - - -def _inherit_from_data( - name: str, delegate: type, cache: bool = False, wrap: bool = False -): - """ - Make an alias for a method of the underlying ExtensionArray. - - Parameters - ---------- - name : str - Name of an attribute the class should inherit from its EA parent. - delegate : class - cache : bool, default False - Whether to convert wrapped properties into cache_readonly - wrap : bool, default False - Whether to wrap the inherited result in an Index. - - Returns - ------- - attribute, method, property, or cache_readonly - """ - attr = getattr(delegate, name) - - if isinstance(attr, property) or type(attr).__name__ == "getset_descriptor": - # getset_descriptor i.e. property defined in cython class - if cache: - - def cached(self): - return getattr(self._data, name) - - cached.__name__ = name - cached.__doc__ = attr.__doc__ - method = cache_readonly(cached) - - else: - - def fget(self): - result = getattr(self._data, name) - if wrap: - if isinstance(result, type(self._data)): - return type(self)._simple_new(result, name=self.name) - elif isinstance(result, ABCDataFrame): - return result.set_index(self) - return Index(result, name=self.name) - return result - - def fset(self, value) -> None: - setattr(self._data, name, value) - - fget.__name__ = name - fget.__doc__ = attr.__doc__ - - method = property(fget, fset) - - elif not callable(attr): - # just a normal attribute, no wrapping - method = attr - - else: - # error: Incompatible redefinition (redefinition with type "Callable[[Any, - # VarArg(Any), KwArg(Any)], Any]", original type "property") - def method(self, *args, **kwargs): # type: ignore[misc] - if "inplace" in kwargs: - raise ValueError(f"cannot use inplace with {type(self).__name__}") - result = attr(self._data, *args, **kwargs) - if wrap: - if isinstance(result, type(self._data)): - return type(self)._simple_new(result, name=self.name) - elif isinstance(result, ABCDataFrame): - return result.set_index(self) - return Index(result, name=self.name) - return result - - # error: "property" has no attribute "__name__" - method.__name__ = name # type: ignore[attr-defined] - method.__doc__ = attr.__doc__ - return method - - -def inherit_names( - names: list[str], delegate: type, cache: bool = False, wrap: bool = False -) -> Callable[[type[_ExtensionIndexT]], type[_ExtensionIndexT]]: - """ - Class decorator to pin attributes from an ExtensionArray to a Index subclass. - - Parameters - ---------- - names : List[str] - delegate : class - cache : bool, default False - wrap : bool, default False - Whether to wrap the inherited result in an Index. - """ - - def wrapper(cls: type[_ExtensionIndexT]) -> type[_ExtensionIndexT]: - for name in names: - meth = _inherit_from_data(name, delegate, cache=cache, wrap=wrap) - setattr(cls, name, meth) - - return cls - - return wrapper - - -class ExtensionIndex(Index): - """ - Index subclass for indexes backed by ExtensionArray. - """ - - # The base class already passes through to _data: - # size, __len__, dtype - - _data: IntervalArray | NDArrayBackedExtensionArray - - # --------------------------------------------------------------------- - - def _validate_fill_value(self, value): - """ - Convert value to be insertable to underlying array. - """ - return self._data._validate_setitem_value(value) - - @cache_readonly - def _isnan(self) -> npt.NDArray[np.bool_]: - # error: Incompatible return value type (got "ExtensionArray", expected - # "ndarray") - return self._data.isna() # type: ignore[return-value] - - -class NDArrayBackedExtensionIndex(ExtensionIndex): - """ - Index subclass for indexes backed by NDArrayBackedExtensionArray. - """ - - _data: NDArrayBackedExtensionArray - - def _get_engine_target(self) -> np.ndarray: - return self._data._ndarray - - def _from_join_target(self, result: np.ndarray) -> ArrayLike: - assert result.dtype == self._data._ndarray.dtype - return self._data._from_backing_data(result) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_indexing.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_indexing.py deleted file mode 100644 index cd28d519313ed36228040361dfbb2a8dccf77be5..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_indexing.py +++ /dev/null @@ -1,611 +0,0 @@ -import numpy as np -import pytest - -from pandas.errors import InvalidIndexError - -from pandas import ( - NA, - Index, - RangeIndex, - Series, - Timestamp, -) -import pandas._testing as tm -from pandas.core.arrays import ( - ArrowExtensionArray, - FloatingArray, -) - - -@pytest.fixture -def index_large(): - # large values used in Index[uint64] tests where no compat needed with Int64/Float64 - large = [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25] - return Index(large, dtype=np.uint64) - - -class TestGetLoc: - def test_get_loc(self): - index = Index([0, 1, 2]) - assert index.get_loc(1) == 1 - - def test_get_loc_raises_bad_label(self): - index = Index([0, 1, 2]) - with pytest.raises(InvalidIndexError, match=r"\[1, 2\]"): - index.get_loc([1, 2]) - - def test_get_loc_float64(self): - idx = Index([0.0, 1.0, 2.0], dtype=np.float64) - - with pytest.raises(KeyError, match="^'foo'$"): - idx.get_loc("foo") - with pytest.raises(KeyError, match=r"^1\.5$"): - idx.get_loc(1.5) - with pytest.raises(KeyError, match="^True$"): - idx.get_loc(True) - with pytest.raises(KeyError, match="^False$"): - idx.get_loc(False) - - def test_get_loc_na(self): - idx = Index([np.nan, 1, 2], dtype=np.float64) - assert idx.get_loc(1) == 1 - assert idx.get_loc(np.nan) == 0 - - idx = Index([np.nan, 1, np.nan], dtype=np.float64) - assert idx.get_loc(1) == 1 - - # representable by slice [0:2:2] - msg = "'Cannot get left slice bound for non-unique label: nan'" - with pytest.raises(KeyError, match=msg): - idx.slice_locs(np.nan) - # not representable by slice - idx = Index([np.nan, 1, np.nan, np.nan], dtype=np.float64) - assert idx.get_loc(1) == 1 - msg = "'Cannot get left slice bound for non-unique label: nan" - with pytest.raises(KeyError, match=msg): - idx.slice_locs(np.nan) - - def test_get_loc_missing_nan(self): - # GH#8569 - idx = Index([1, 2], dtype=np.float64) - assert idx.get_loc(1) == 0 - with pytest.raises(KeyError, match=r"^3$"): - idx.get_loc(3) - with pytest.raises(KeyError, match="^nan$"): - idx.get_loc(np.nan) - with pytest.raises(InvalidIndexError, match=r"\[nan\]"): - # listlike/non-hashable raises TypeError - idx.get_loc([np.nan]) - - @pytest.mark.parametrize("vals", [[1], [1.0], [Timestamp("2019-12-31")], ["test"]]) - def test_get_loc_float_index_nan_with_method(self, vals): - # GH#39382 - idx = Index(vals) - with pytest.raises(KeyError, match="nan"): - idx.get_loc(np.nan) - - @pytest.mark.parametrize("dtype", ["f8", "i8", "u8"]) - def test_get_loc_numericindex_none_raises(self, dtype): - # case that goes through searchsorted and key is non-comparable to values - arr = np.arange(10**7, dtype=dtype) - idx = Index(arr) - with pytest.raises(KeyError, match="None"): - idx.get_loc(None) - - def test_get_loc_overflows(self): - # unique but non-monotonic goes through IndexEngine.mapping.get_item - idx = Index([0, 2, 1]) - - val = np.iinfo(np.int64).max + 1 - - with pytest.raises(KeyError, match=str(val)): - idx.get_loc(val) - with pytest.raises(KeyError, match=str(val)): - idx._engine.get_loc(val) - - -class TestGetIndexer: - def test_get_indexer(self): - index1 = Index([1, 2, 3, 4, 5]) - index2 = Index([2, 4, 6]) - - r1 = index1.get_indexer(index2) - e1 = np.array([1, 3, -1], dtype=np.intp) - tm.assert_almost_equal(r1, e1) - - @pytest.mark.parametrize("reverse", [True, False]) - @pytest.mark.parametrize( - "expected,method", - [ - (np.array([-1, 0, 0, 1, 1], dtype=np.intp), "pad"), - (np.array([-1, 0, 0, 1, 1], dtype=np.intp), "ffill"), - (np.array([0, 0, 1, 1, 2], dtype=np.intp), "backfill"), - (np.array([0, 0, 1, 1, 2], dtype=np.intp), "bfill"), - ], - ) - def test_get_indexer_methods(self, reverse, expected, method): - index1 = Index([1, 2, 3, 4, 5]) - index2 = Index([2, 4, 6]) - - if reverse: - index1 = index1[::-1] - expected = expected[::-1] - - result = index2.get_indexer(index1, method=method) - tm.assert_almost_equal(result, expected) - - def test_get_indexer_invalid(self): - # GH10411 - index = Index(np.arange(10)) - - with pytest.raises(ValueError, match="tolerance argument"): - index.get_indexer([1, 0], tolerance=1) - - with pytest.raises(ValueError, match="limit argument"): - index.get_indexer([1, 0], limit=1) - - @pytest.mark.parametrize( - "method, tolerance, indexer, expected", - [ - ("pad", None, [0, 5, 9], [0, 5, 9]), - ("backfill", None, [0, 5, 9], [0, 5, 9]), - ("nearest", None, [0, 5, 9], [0, 5, 9]), - ("pad", 0, [0, 5, 9], [0, 5, 9]), - ("backfill", 0, [0, 5, 9], [0, 5, 9]), - ("nearest", 0, [0, 5, 9], [0, 5, 9]), - ("pad", None, [0.2, 1.8, 8.5], [0, 1, 8]), - ("backfill", None, [0.2, 1.8, 8.5], [1, 2, 9]), - ("nearest", None, [0.2, 1.8, 8.5], [0, 2, 9]), - ("pad", 1, [0.2, 1.8, 8.5], [0, 1, 8]), - ("backfill", 1, [0.2, 1.8, 8.5], [1, 2, 9]), - ("nearest", 1, [0.2, 1.8, 8.5], [0, 2, 9]), - ("pad", 0.2, [0.2, 1.8, 8.5], [0, -1, -1]), - ("backfill", 0.2, [0.2, 1.8, 8.5], [-1, 2, -1]), - ("nearest", 0.2, [0.2, 1.8, 8.5], [0, 2, -1]), - ], - ) - def test_get_indexer_nearest(self, method, tolerance, indexer, expected): - index = Index(np.arange(10)) - - actual = index.get_indexer(indexer, method=method, tolerance=tolerance) - tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp)) - - @pytest.mark.parametrize("listtype", [list, tuple, Series, np.array]) - @pytest.mark.parametrize( - "tolerance, expected", - list( - zip( - [[0.3, 0.3, 0.1], [0.2, 0.1, 0.1], [0.1, 0.5, 0.5]], - [[0, 2, -1], [0, -1, -1], [-1, 2, 9]], - ) - ), - ) - def test_get_indexer_nearest_listlike_tolerance( - self, tolerance, expected, listtype - ): - index = Index(np.arange(10)) - - actual = index.get_indexer( - [0.2, 1.8, 8.5], method="nearest", tolerance=listtype(tolerance) - ) - tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp)) - - def test_get_indexer_nearest_error(self): - index = Index(np.arange(10)) - with pytest.raises(ValueError, match="limit argument"): - index.get_indexer([1, 0], method="nearest", limit=1) - - with pytest.raises(ValueError, match="tolerance size must match"): - index.get_indexer([1, 0], method="nearest", tolerance=[1, 2, 3]) - - @pytest.mark.parametrize( - "method,expected", - [("pad", [8, 7, 0]), ("backfill", [9, 8, 1]), ("nearest", [9, 7, 0])], - ) - def test_get_indexer_nearest_decreasing(self, method, expected): - index = Index(np.arange(10))[::-1] - - actual = index.get_indexer([0, 5, 9], method=method) - tm.assert_numpy_array_equal(actual, np.array([9, 4, 0], dtype=np.intp)) - - actual = index.get_indexer([0.2, 1.8, 8.5], method=method) - tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp)) - - @pytest.mark.parametrize("idx_dtype", ["int64", "float64", "uint64", "range"]) - @pytest.mark.parametrize("method", ["get_indexer", "get_indexer_non_unique"]) - def test_get_indexer_numeric_index_boolean_target(self, method, idx_dtype): - # GH 16877 - - if idx_dtype == "range": - numeric_index = RangeIndex(4) - else: - numeric_index = Index(np.arange(4, dtype=idx_dtype)) - - other = Index([True, False, True]) - - result = getattr(numeric_index, method)(other) - expected = np.array([-1, -1, -1], dtype=np.intp) - if method == "get_indexer": - tm.assert_numpy_array_equal(result, expected) - else: - missing = np.arange(3, dtype=np.intp) - tm.assert_numpy_array_equal(result[0], expected) - tm.assert_numpy_array_equal(result[1], missing) - - @pytest.mark.parametrize("method", ["pad", "backfill", "nearest"]) - def test_get_indexer_with_method_numeric_vs_bool(self, method): - left = Index([1, 2, 3]) - right = Index([True, False]) - - with pytest.raises(TypeError, match="Cannot compare"): - left.get_indexer(right, method=method) - - with pytest.raises(TypeError, match="Cannot compare"): - right.get_indexer(left, method=method) - - def test_get_indexer_numeric_vs_bool(self): - left = Index([1, 2, 3]) - right = Index([True, False]) - - res = left.get_indexer(right) - expected = -1 * np.ones(len(right), dtype=np.intp) - tm.assert_numpy_array_equal(res, expected) - - res = right.get_indexer(left) - expected = -1 * np.ones(len(left), dtype=np.intp) - tm.assert_numpy_array_equal(res, expected) - - res = left.get_indexer_non_unique(right)[0] - expected = -1 * np.ones(len(right), dtype=np.intp) - tm.assert_numpy_array_equal(res, expected) - - res = right.get_indexer_non_unique(left)[0] - expected = -1 * np.ones(len(left), dtype=np.intp) - tm.assert_numpy_array_equal(res, expected) - - def test_get_indexer_float64(self): - idx = Index([0.0, 1.0, 2.0], dtype=np.float64) - tm.assert_numpy_array_equal( - idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp) - ) - - target = [-0.1, 0.5, 1.1] - tm.assert_numpy_array_equal( - idx.get_indexer(target, "pad"), np.array([-1, 0, 1], dtype=np.intp) - ) - tm.assert_numpy_array_equal( - idx.get_indexer(target, "backfill"), np.array([0, 1, 2], dtype=np.intp) - ) - tm.assert_numpy_array_equal( - idx.get_indexer(target, "nearest"), np.array([0, 1, 1], dtype=np.intp) - ) - - def test_get_indexer_nan(self): - # GH#7820 - result = Index([1, 2, np.nan], dtype=np.float64).get_indexer([np.nan]) - expected = np.array([2], dtype=np.intp) - tm.assert_numpy_array_equal(result, expected) - - def test_get_indexer_int64(self): - index = Index(range(0, 20, 2), dtype=np.int64) - target = Index(np.arange(10), dtype=np.int64) - indexer = index.get_indexer(target) - expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1], dtype=np.intp) - tm.assert_numpy_array_equal(indexer, expected) - - target = Index(np.arange(10), dtype=np.int64) - indexer = index.get_indexer(target, method="pad") - expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=np.intp) - tm.assert_numpy_array_equal(indexer, expected) - - target = Index(np.arange(10), dtype=np.int64) - indexer = index.get_indexer(target, method="backfill") - expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp) - tm.assert_numpy_array_equal(indexer, expected) - - def test_get_indexer_uint64(self, index_large): - target = Index(np.arange(10).astype("uint64") * 5 + 2**63) - indexer = index_large.get_indexer(target) - expected = np.array([0, -1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp) - tm.assert_numpy_array_equal(indexer, expected) - - target = Index(np.arange(10).astype("uint64") * 5 + 2**63) - indexer = index_large.get_indexer(target, method="pad") - expected = np.array([0, 0, 1, 2, 3, 4, 4, 4, 4, 4], dtype=np.intp) - tm.assert_numpy_array_equal(indexer, expected) - - target = Index(np.arange(10).astype("uint64") * 5 + 2**63) - indexer = index_large.get_indexer(target, method="backfill") - expected = np.array([0, 1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp) - tm.assert_numpy_array_equal(indexer, expected) - - @pytest.mark.parametrize("val, val2", [(4, 5), (4, 4), (4, NA), (NA, NA)]) - def test_get_loc_masked(self, val, val2, any_numeric_ea_and_arrow_dtype): - # GH#39133 - idx = Index([1, 2, 3, val, val2], dtype=any_numeric_ea_and_arrow_dtype) - result = idx.get_loc(2) - assert result == 1 - - with pytest.raises(KeyError, match="9"): - idx.get_loc(9) - - def test_get_loc_masked_na(self, any_numeric_ea_and_arrow_dtype): - # GH#39133 - idx = Index([1, 2, NA], dtype=any_numeric_ea_and_arrow_dtype) - result = idx.get_loc(NA) - assert result == 2 - - idx = Index([1, 2, NA, NA], dtype=any_numeric_ea_and_arrow_dtype) - result = idx.get_loc(NA) - tm.assert_numpy_array_equal(result, np.array([False, False, True, True])) - - idx = Index([1, 2, 3], dtype=any_numeric_ea_and_arrow_dtype) - with pytest.raises(KeyError, match="NA"): - idx.get_loc(NA) - - def test_get_loc_masked_na_and_nan(self): - # GH#39133 - idx = Index( - FloatingArray( - np.array([1, 2, 1, np.nan]), mask=np.array([False, False, True, False]) - ) - ) - result = idx.get_loc(NA) - assert result == 2 - result = idx.get_loc(np.nan) - assert result == 3 - - idx = Index( - FloatingArray(np.array([1, 2, 1.0]), mask=np.array([False, False, True])) - ) - result = idx.get_loc(NA) - assert result == 2 - with pytest.raises(KeyError, match="nan"): - idx.get_loc(np.nan) - - idx = Index( - FloatingArray( - np.array([1, 2, np.nan]), mask=np.array([False, False, False]) - ) - ) - result = idx.get_loc(np.nan) - assert result == 2 - with pytest.raises(KeyError, match="NA"): - idx.get_loc(NA) - - @pytest.mark.parametrize("val", [4, 2]) - def test_get_indexer_masked_na(self, any_numeric_ea_and_arrow_dtype, val): - # GH#39133 - idx = Index([1, 2, NA, 3, val], dtype=any_numeric_ea_and_arrow_dtype) - result = idx.get_indexer_for([1, NA, 5]) - expected = np.array([0, 2, -1]) - tm.assert_numpy_array_equal(result, expected, check_dtype=False) - - @pytest.mark.parametrize("dtype", ["boolean", "bool[pyarrow]"]) - def test_get_indexer_masked_na_boolean(self, dtype): - # GH#39133 - if dtype == "bool[pyarrow]": - pytest.importorskip("pyarrow") - idx = Index([True, False, NA], dtype=dtype) - result = idx.get_loc(False) - assert result == 1 - result = idx.get_loc(NA) - assert result == 2 - - def test_get_indexer_arrow_dictionary_target(self): - pa = pytest.importorskip("pyarrow") - target = Index( - ArrowExtensionArray( - pa.array([1, 2], type=pa.dictionary(pa.int8(), pa.int8())) - ) - ) - idx = Index([1]) - - result = idx.get_indexer(target) - expected = np.array([0, -1], dtype=np.int64) - tm.assert_numpy_array_equal(result, expected) - - result_1, result_2 = idx.get_indexer_non_unique(target) - expected_1, expected_2 = np.array([0, -1], dtype=np.int64), np.array( - [1], dtype=np.int64 - ) - tm.assert_numpy_array_equal(result_1, expected_1) - tm.assert_numpy_array_equal(result_2, expected_2) - - -class TestWhere: - @pytest.mark.parametrize( - "index", - [ - Index(np.arange(5, dtype="float64")), - Index(range(0, 20, 2), dtype=np.int64), - Index(np.arange(5, dtype="uint64")), - ], - ) - def test_where(self, listlike_box, index): - cond = [True] * len(index) - expected = index - result = index.where(listlike_box(cond)) - - cond = [False] + [True] * (len(index) - 1) - expected = Index([index._na_value] + index[1:].tolist(), dtype=np.float64) - result = index.where(listlike_box(cond)) - tm.assert_index_equal(result, expected) - - def test_where_uint64(self): - idx = Index([0, 6, 2], dtype=np.uint64) - mask = np.array([False, True, False]) - other = np.array([1], dtype=np.int64) - - expected = Index([1, 6, 1], dtype=np.uint64) - - result = idx.where(mask, other) - tm.assert_index_equal(result, expected) - - result = idx.putmask(~mask, other) - tm.assert_index_equal(result, expected) - - def test_where_infers_type_instead_of_trying_to_convert_string_to_float(self): - # GH 32413 - index = Index([1, np.nan]) - cond = index.notna() - other = Index(["a", "b"], dtype="string") - - expected = Index([1.0, "b"]) - result = index.where(cond, other) - - tm.assert_index_equal(result, expected) - - -class TestTake: - @pytest.mark.parametrize("idx_dtype", [np.float64, np.int64, np.uint64]) - def test_take_preserve_name(self, idx_dtype): - index = Index([1, 2, 3, 4], dtype=idx_dtype, name="foo") - taken = index.take([3, 0, 1]) - assert index.name == taken.name - - def test_take_fill_value_float64(self): - # GH 12631 - idx = Index([1.0, 2.0, 3.0], name="xxx", dtype=np.float64) - result = idx.take(np.array([1, 0, -1])) - expected = Index([2.0, 1.0, 3.0], dtype=np.float64, name="xxx") - tm.assert_index_equal(result, expected) - - # fill_value - result = idx.take(np.array([1, 0, -1]), fill_value=True) - expected = Index([2.0, 1.0, np.nan], dtype=np.float64, name="xxx") - tm.assert_index_equal(result, expected) - - # allow_fill=False - result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) - expected = Index([2.0, 1.0, 3.0], dtype=np.float64, name="xxx") - tm.assert_index_equal(result, expected) - - msg = ( - "When allow_fill=True and fill_value is not None, " - "all indices must be >= -1" - ) - with pytest.raises(ValueError, match=msg): - idx.take(np.array([1, 0, -2]), fill_value=True) - with pytest.raises(ValueError, match=msg): - idx.take(np.array([1, 0, -5]), fill_value=True) - - msg = "index -5 is out of bounds for (axis 0 with )?size 3" - with pytest.raises(IndexError, match=msg): - idx.take(np.array([1, -5])) - - @pytest.mark.parametrize("dtype", [np.int64, np.uint64]) - def test_take_fill_value_ints(self, dtype): - # see gh-12631 - idx = Index([1, 2, 3], dtype=dtype, name="xxx") - result = idx.take(np.array([1, 0, -1])) - expected = Index([2, 1, 3], dtype=dtype, name="xxx") - tm.assert_index_equal(result, expected) - - name = type(idx).__name__ - msg = f"Unable to fill values because {name} cannot contain NA" - - # fill_value=True - with pytest.raises(ValueError, match=msg): - idx.take(np.array([1, 0, -1]), fill_value=True) - - # allow_fill=False - result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) - expected = Index([2, 1, 3], dtype=dtype, name="xxx") - tm.assert_index_equal(result, expected) - - with pytest.raises(ValueError, match=msg): - idx.take(np.array([1, 0, -2]), fill_value=True) - with pytest.raises(ValueError, match=msg): - idx.take(np.array([1, 0, -5]), fill_value=True) - - msg = "index -5 is out of bounds for (axis 0 with )?size 3" - with pytest.raises(IndexError, match=msg): - idx.take(np.array([1, -5])) - - -class TestContains: - @pytest.mark.parametrize("dtype", [np.float64, np.int64, np.uint64]) - def test_contains_none(self, dtype): - # GH#35788 should return False, not raise TypeError - index = Index([0, 1, 2, 3, 4], dtype=dtype) - assert None not in index - - def test_contains_float64_nans(self): - index = Index([1.0, 2.0, np.nan], dtype=np.float64) - assert np.nan in index - - def test_contains_float64_not_nans(self): - index = Index([1.0, 2.0, np.nan], dtype=np.float64) - assert 1.0 in index - - -class TestSliceLocs: - @pytest.mark.parametrize("dtype", [int, float]) - def test_slice_locs(self, dtype): - index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype)) - n = len(index) - - assert index.slice_locs(start=2) == (2, n) - assert index.slice_locs(start=3) == (3, n) - assert index.slice_locs(3, 8) == (3, 6) - assert index.slice_locs(5, 10) == (3, n) - assert index.slice_locs(end=8) == (0, 6) - assert index.slice_locs(end=9) == (0, 7) - - # reversed - index2 = index[::-1] - assert index2.slice_locs(8, 2) == (2, 6) - assert index2.slice_locs(7, 3) == (2, 5) - - @pytest.mark.parametrize("dtype", [int, float]) - def test_slice_locs_float_locs(self, dtype): - index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype)) - n = len(index) - assert index.slice_locs(5.0, 10.0) == (3, n) - assert index.slice_locs(4.5, 10.5) == (3, 8) - - index2 = index[::-1] - assert index2.slice_locs(8.5, 1.5) == (2, 6) - assert index2.slice_locs(10.5, -1) == (0, n) - - @pytest.mark.parametrize("dtype", [int, float]) - def test_slice_locs_dup_numeric(self, dtype): - index = Index(np.array([10, 12, 12, 14], dtype=dtype)) - assert index.slice_locs(12, 12) == (1, 3) - assert index.slice_locs(11, 13) == (1, 3) - - index2 = index[::-1] - assert index2.slice_locs(12, 12) == (1, 3) - assert index2.slice_locs(13, 11) == (1, 3) - - def test_slice_locs_na(self): - index = Index([np.nan, 1, 2]) - assert index.slice_locs(1) == (1, 3) - assert index.slice_locs(np.nan) == (0, 3) - - index = Index([0, np.nan, np.nan, 1, 2]) - assert index.slice_locs(np.nan) == (1, 5) - - def test_slice_locs_na_raises(self): - index = Index([np.nan, 1, 2]) - with pytest.raises(KeyError, match=""): - index.slice_locs(start=1.5) - - with pytest.raises(KeyError, match=""): - index.slice_locs(end=1.5) - - -class TestGetSliceBounds: - @pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)]) - def test_get_slice_bounds_within(self, side, expected): - index = Index(range(6)) - result = index.get_slice_bound(4, side=side) - assert result == expected - - @pytest.mark.parametrize("side", ["left", "right"]) - @pytest.mark.parametrize("bound, expected", [(-1, 0), (10, 6)]) - def test_get_slice_bounds_outside(self, side, expected, bound): - index = Index(range(6)) - result = index.get_slice_bound(bound, side=side) - assert result == expected diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/excel/test_odswriter.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/excel/test_odswriter.py deleted file mode 100644 index 21d31ec8a7fb514e0c71d0120047f4ae5112e303..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/excel/test_odswriter.py +++ /dev/null @@ -1,49 +0,0 @@ -import re - -import pytest - -import pandas._testing as tm - -from pandas.io.excel import ExcelWriter - -odf = pytest.importorskip("odf") - -pytestmark = pytest.mark.parametrize("ext", [".ods"]) - - -def test_write_append_mode_raises(ext): - msg = "Append mode is not supported with odf!" - - with tm.ensure_clean(ext) as f: - with pytest.raises(ValueError, match=msg): - ExcelWriter(f, engine="odf", mode="a") - - -@pytest.mark.parametrize("engine_kwargs", [None, {"kwarg": 1}]) -def test_engine_kwargs(ext, engine_kwargs): - # GH 42286 - # GH 43445 - # test for error: OpenDocumentSpreadsheet does not accept any arguments - with tm.ensure_clean(ext) as f: - if engine_kwargs is not None: - error = re.escape( - "OpenDocumentSpreadsheet() got an unexpected keyword argument 'kwarg'" - ) - with pytest.raises( - TypeError, - match=error, - ): - ExcelWriter(f, engine="odf", engine_kwargs=engine_kwargs) - else: - with ExcelWriter(f, engine="odf", engine_kwargs=engine_kwargs) as _: - pass - - -def test_book_and_sheets_consistent(ext): - # GH#45687 - Ensure sheets is updated if user modifies book - with tm.ensure_clean(ext) as f: - with ExcelWriter(f) as writer: - assert writer.sheets == {} - table = odf.table.Table(name="test_name") - writer.book.spreadsheet.addElement(table) - assert writer.sheets == {"test_name": table} diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/formats/test_printing.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/formats/test_printing.py deleted file mode 100644 index 2d0dc0d9377097329664db177ef4003d6e44d4d9..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/formats/test_printing.py +++ /dev/null @@ -1,248 +0,0 @@ -import string - -import numpy as np -import pytest - -import pandas._config.config as cf - -import pandas as pd - -from pandas.io.formats import printing -import pandas.io.formats.format as fmt - - -def test_adjoin(): - data = [["a", "b", "c"], ["dd", "ee", "ff"], ["ggg", "hhh", "iii"]] - expected = "a dd ggg\nb ee hhh\nc ff iii" - - adjoined = printing.adjoin(2, *data) - - assert adjoined == expected - - -def test_repr_binary_type(): - letters = string.ascii_letters - try: - raw = bytes(letters, encoding=cf.get_option("display.encoding")) - except TypeError: - raw = bytes(letters) - b = str(raw.decode("utf-8")) - res = printing.pprint_thing(b, quote_strings=True) - assert res == repr(b) - res = printing.pprint_thing(b, quote_strings=False) - assert res == b - - -class TestFormattBase: - def test_adjoin(self): - data = [["a", "b", "c"], ["dd", "ee", "ff"], ["ggg", "hhh", "iii"]] - expected = "a dd ggg\nb ee hhh\nc ff iii" - - adjoined = printing.adjoin(2, *data) - - assert adjoined == expected - - def test_adjoin_unicode(self): - data = [["あ", "b", "c"], ["dd", "ええ", "ff"], ["ggg", "hhh", "いいい"]] - expected = "あ dd ggg\nb ええ hhh\nc ff いいい" - adjoined = printing.adjoin(2, *data) - assert adjoined == expected - - adj = fmt.EastAsianTextAdjustment() - - expected = """あ dd ggg -b ええ hhh -c ff いいい""" - - adjoined = adj.adjoin(2, *data) - assert adjoined == expected - cols = adjoined.split("\n") - assert adj.len(cols[0]) == 13 - assert adj.len(cols[1]) == 13 - assert adj.len(cols[2]) == 16 - - expected = """あ dd ggg -b ええ hhh -c ff いいい""" - - adjoined = adj.adjoin(7, *data) - assert adjoined == expected - cols = adjoined.split("\n") - assert adj.len(cols[0]) == 23 - assert adj.len(cols[1]) == 23 - assert adj.len(cols[2]) == 26 - - def test_justify(self): - adj = fmt.EastAsianTextAdjustment() - - def just(x, *args, **kwargs): - # wrapper to test single str - return adj.justify([x], *args, **kwargs)[0] - - assert just("abc", 5, mode="left") == "abc " - assert just("abc", 5, mode="center") == " abc " - assert just("abc", 5, mode="right") == " abc" - assert just("abc", 5, mode="left") == "abc " - assert just("abc", 5, mode="center") == " abc " - assert just("abc", 5, mode="right") == " abc" - - assert just("パンダ", 5, mode="left") == "パンダ" - assert just("パンダ", 5, mode="center") == "パンダ" - assert just("パンダ", 5, mode="right") == "パンダ" - - assert just("パンダ", 10, mode="left") == "パンダ " - assert just("パンダ", 10, mode="center") == " パンダ " - assert just("パンダ", 10, mode="right") == " パンダ" - - def test_east_asian_len(self): - adj = fmt.EastAsianTextAdjustment() - - assert adj.len("abc") == 3 - assert adj.len("abc") == 3 - - assert adj.len("パンダ") == 6 - assert adj.len("パンダ") == 5 - assert adj.len("パンダpanda") == 11 - assert adj.len("パンダpanda") == 10 - - def test_ambiguous_width(self): - adj = fmt.EastAsianTextAdjustment() - assert adj.len("¡¡ab") == 4 - - with cf.option_context("display.unicode.ambiguous_as_wide", True): - adj = fmt.EastAsianTextAdjustment() - assert adj.len("¡¡ab") == 6 - - data = [["あ", "b", "c"], ["dd", "ええ", "ff"], ["ggg", "¡¡ab", "いいい"]] - expected = "あ dd ggg \nb ええ ¡¡ab\nc ff いいい" - adjoined = adj.adjoin(2, *data) - assert adjoined == expected - - -class TestTableSchemaRepr: - def test_publishes(self, ip): - ipython = ip.instance(config=ip.config) - df = pd.DataFrame({"A": [1, 2]}) - objects = [df["A"], df] # dataframe / series - expected_keys = [ - {"text/plain", "application/vnd.dataresource+json"}, - {"text/plain", "text/html", "application/vnd.dataresource+json"}, - ] - - opt = pd.option_context("display.html.table_schema", True) - last_obj = None - for obj, expected in zip(objects, expected_keys): - last_obj = obj - with opt: - formatted = ipython.display_formatter.format(obj) - assert set(formatted[0].keys()) == expected - - with_latex = pd.option_context("styler.render.repr", "latex") - - with opt, with_latex: - formatted = ipython.display_formatter.format(last_obj) - - expected = { - "text/plain", - "text/html", - "text/latex", - "application/vnd.dataresource+json", - } - assert set(formatted[0].keys()) == expected - - def test_publishes_not_implemented(self, ip): - # column MultiIndex - # GH 15996 - midx = pd.MultiIndex.from_product([["A", "B"], ["a", "b", "c"]]) - df = pd.DataFrame( - np.random.default_rng(2).standard_normal((5, len(midx))), columns=midx - ) - - opt = pd.option_context("display.html.table_schema", True) - - with opt: - formatted = ip.instance(config=ip.config).display_formatter.format(df) - - expected = {"text/plain", "text/html"} - assert set(formatted[0].keys()) == expected - - def test_config_on(self): - df = pd.DataFrame({"A": [1, 2]}) - with pd.option_context("display.html.table_schema", True): - result = df._repr_data_resource_() - - assert result is not None - - def test_config_default_off(self): - df = pd.DataFrame({"A": [1, 2]}) - with pd.option_context("display.html.table_schema", False): - result = df._repr_data_resource_() - - assert result is None - - def test_enable_data_resource_formatter(self, ip): - # GH 10491 - formatters = ip.instance(config=ip.config).display_formatter.formatters - mimetype = "application/vnd.dataresource+json" - - with pd.option_context("display.html.table_schema", True): - assert "application/vnd.dataresource+json" in formatters - assert formatters[mimetype].enabled - - # still there, just disabled - assert "application/vnd.dataresource+json" in formatters - assert not formatters[mimetype].enabled - - # able to re-set - with pd.option_context("display.html.table_schema", True): - assert "application/vnd.dataresource+json" in formatters - assert formatters[mimetype].enabled - # smoke test that it works - ip.instance(config=ip.config).display_formatter.format(cf) - - -def test_multiindex_long_element(): - # Non-regression test towards GH #52960 - data = pd.MultiIndex.from_tuples([("c" * 62,)]) - - expected = ( - "MultiIndex([('cccccccccccccccccccccccccccccccccccccccc" - "cccccccccccccccccccccc',)],\n )" - ) - assert str(data) == expected - - -@pytest.mark.parametrize( - "data,output", - [ - ([2, complex("nan"), 1], [" 2.0+0.0j", " NaN+0.0j", " 1.0+0.0j"]), - ([2, complex("nan"), -1], [" 2.0+0.0j", " NaN+0.0j", "-1.0+0.0j"]), - ([-2, complex("nan"), -1], ["-2.0+0.0j", " NaN+0.0j", "-1.0+0.0j"]), - ([-1.23j, complex("nan"), -1], ["-0.00-1.23j", " NaN+0.00j", "-1.00+0.00j"]), - ([1.23j, complex("nan"), 1.23], [" 0.00+1.23j", " NaN+0.00j", " 1.23+0.00j"]), - ( - [-1.23j, complex(np.nan, np.nan), 1], - ["-0.00-1.23j", " NaN+ NaNj", " 1.00+0.00j"], - ), - ( - [-1.23j, complex(1.2, np.nan), 1], - ["-0.00-1.23j", " 1.20+ NaNj", " 1.00+0.00j"], - ), - ( - [-1.23j, complex(np.nan, -1.2), 1], - ["-0.00-1.23j", " NaN-1.20j", " 1.00+0.00j"], - ), - ], -) -@pytest.mark.parametrize("as_frame", [True, False]) -def test_ser_df_with_complex_nans(data, output, as_frame): - # GH#53762, GH#53841 - obj = pd.Series(np.array(data)) - if as_frame: - obj = obj.to_frame(name="val") - reprs = [f"{i} {val}" for i, val in enumerate(output)] - expected = f"{'val': >{len(reprs[0])}}\n" + "\n".join(reprs) - else: - reprs = [f"{i} {val}" for i, val in enumerate(output)] - expected = "\n".join(reprs) + "\ndtype: complex128" - assert str(obj) == expected, f"\n{str(obj)}\n\n{expected}" diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/scdoc.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/scdoc.py deleted file mode 100644 index 90478acf6bd968e5c9520ab7ca7eac1aeb60863a..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/scdoc.py +++ /dev/null @@ -1,86 +0,0 @@ -""" - pygments.lexers.scdoc - ~~~~~~~~~~~~~~~~~~~~~ - - Lexer for scdoc, a simple man page generator. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, include, bygroups, using, this -from pygments.token import Text, Comment, Keyword, String, Generic - -__all__ = ['ScdocLexer'] - - -class ScdocLexer(RegexLexer): - """ - `scdoc` is a simple man page generator for POSIX systems written in C99. - - .. versionadded:: 2.5 - """ - name = 'scdoc' - url = 'https://git.sr.ht/~sircmpwn/scdoc' - aliases = ['scdoc', 'scd'] - filenames = ['*.scd', '*.scdoc'] - flags = re.MULTILINE - - tokens = { - 'root': [ - # comment - (r'^(;.+\n)', bygroups(Comment)), - - # heading with pound prefix - (r'^(#)([^#].+\n)', bygroups(Generic.Heading, Text)), - (r'^(#{2})(.+\n)', bygroups(Generic.Subheading, Text)), - # bulleted lists - (r'^(\s*)([*-])(\s)(.+\n)', - bygroups(Text, Keyword, Text, using(this, state='inline'))), - # numbered lists - (r'^(\s*)(\.+\.)( .+\n)', - bygroups(Text, Keyword, using(this, state='inline'))), - # quote - (r'^(\s*>\s)(.+\n)', bygroups(Keyword, Generic.Emph)), - # text block - (r'^(```\n)([\w\W]*?)(^```$)', bygroups(String, Text, String)), - - include('inline'), - ], - 'inline': [ - # escape - (r'\\.', Text), - # underlines - (r'(\s)(_[^_]+_)(\W|\n)', bygroups(Text, Generic.Emph, Text)), - # bold - (r'(\s)(\*[^*]+\*)(\W|\n)', bygroups(Text, Generic.Strong, Text)), - # inline code - (r'`[^`]+`', String.Backtick), - - # general text, must come last! - (r'[^\\\s]+', Text), - (r'.', Text), - ], - } - - def analyse_text(text): - """We checks for bold and underline text with * and _. Also - every scdoc file must start with a strictly defined first line.""" - result = 0 - - if '*' in text: - result += 0.01 - - if '_' in text: - result += 0.01 - - # name(section) ["left_footer" ["center_header"]] - first_line = text.partition('\n')[0] - scdoc_preamble_pattern = r'^.*\([1-7]\)( "[^"]+"){0,2}$' - - if re.search(scdoc_preamble_pattern, first_line): - result += 0.5 - - return result diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/smv.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/smv.py deleted file mode 100644 index 2584086e24be71ebc53185a148d6024df8fa7788..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/smv.py +++ /dev/null @@ -1,78 +0,0 @@ -""" - pygments.lexers.smv - ~~~~~~~~~~~~~~~~~~~ - - Lexers for the SMV languages. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer, words -from pygments.token import Comment, Keyword, Name, Number, Operator, \ - Punctuation, Text - -__all__ = ['NuSMVLexer'] - - -class NuSMVLexer(RegexLexer): - """ - Lexer for the NuSMV language. - - .. versionadded:: 2.2 - """ - - name = 'NuSMV' - aliases = ['nusmv'] - filenames = ['*.smv'] - mimetypes = [] - - tokens = { - 'root': [ - # Comments - (r'(?s)\/\-\-.*?\-\-/', Comment), - (r'--.*\n', Comment), - - # Reserved - (words(('MODULE', 'DEFINE', 'MDEFINE', 'CONSTANTS', 'VAR', 'IVAR', - 'FROZENVAR', 'INIT', 'TRANS', 'INVAR', 'SPEC', 'CTLSPEC', - 'LTLSPEC', 'PSLSPEC', 'COMPUTE', 'NAME', 'INVARSPEC', - 'FAIRNESS', 'JUSTICE', 'COMPASSION', 'ISA', 'ASSIGN', - 'CONSTRAINT', 'SIMPWFF', 'CTLWFF', 'LTLWFF', 'PSLWFF', - 'COMPWFF', 'IN', 'MIN', 'MAX', 'MIRROR', 'PRED', - 'PREDICATES'), suffix=r'(?![\w$#-])'), - Keyword.Declaration), - (r'process(?![\w$#-])', Keyword), - (words(('array', 'of', 'boolean', 'integer', 'real', 'word'), - suffix=r'(?![\w$#-])'), Keyword.Type), - (words(('case', 'esac'), suffix=r'(?![\w$#-])'), Keyword), - (words(('word1', 'bool', 'signed', 'unsigned', 'extend', 'resize', - 'sizeof', 'uwconst', 'swconst', 'init', 'self', 'count', - 'abs', 'max', 'min'), suffix=r'(?![\w$#-])'), - Name.Builtin), - (words(('EX', 'AX', 'EF', 'AF', 'EG', 'AG', 'E', 'F', 'O', 'G', - 'H', 'X', 'Y', 'Z', 'A', 'U', 'S', 'V', 'T', 'BU', 'EBF', - 'ABF', 'EBG', 'ABG', 'next', 'mod', 'union', 'in', 'xor', - 'xnor'), suffix=r'(?![\w$#-])'), - Operator.Word), - (words(('TRUE', 'FALSE'), suffix=r'(?![\w$#-])'), Keyword.Constant), - - # Names - (r'[a-zA-Z_][\w$#-]*', Name.Variable), - - # Operators - (r':=', Operator), - (r'[-&|+*/<>!=]', Operator), - - # Literals - (r'\-?\d+\b', Number.Integer), - (r'0[su][bB]\d*_[01_]+', Number.Bin), - (r'0[su][oO]\d*_[0-7_]+', Number.Oct), - (r'0[su][dD]\d*_[\d_]+', Number.Decimal), - (r'0[su][hH]\d*_[\da-fA-F_]+', Number.Hex), - - # Whitespace, punctuation and the rest - (r'\s+', Text.Whitespace), - (r'[()\[\]{};?:.,]', Punctuation), - ], - } diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/uvicorn/server.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/uvicorn/server.py deleted file mode 100644 index 3e0db9d011423abb60320154fcf46c282cc3c349..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/uvicorn/server.py +++ /dev/null @@ -1,334 +0,0 @@ -import asyncio -import logging -import os -import platform -import signal -import socket -import sys -import threading -import time -from email.utils import formatdate -from types import FrameType -from typing import TYPE_CHECKING, List, Optional, Sequence, Set, Tuple, Union - -import click - -from uvicorn.config import Config - -if TYPE_CHECKING: - from uvicorn.protocols.http.h11_impl import H11Protocol - from uvicorn.protocols.http.httptools_impl import HttpToolsProtocol - from uvicorn.protocols.websockets.websockets_impl import WebSocketProtocol - from uvicorn.protocols.websockets.wsproto_impl import WSProtocol - - Protocols = Union[H11Protocol, HttpToolsProtocol, WSProtocol, WebSocketProtocol] - - -HANDLED_SIGNALS = ( - signal.SIGINT, # Unix signal 2. Sent by Ctrl+C. - signal.SIGTERM, # Unix signal 15. Sent by `kill `. -) -if sys.platform == "win32": # pragma: py-not-win32 - HANDLED_SIGNALS += (signal.SIGBREAK,) # Windows signal 21. Sent by Ctrl+Break. - -logger = logging.getLogger("uvicorn.error") - - -class ServerState: - """ - Shared servers state that is available between all protocol instances. - """ - - def __init__(self) -> None: - self.total_requests = 0 - self.connections: Set["Protocols"] = set() - self.tasks: Set[asyncio.Task] = set() - self.default_headers: List[Tuple[bytes, bytes]] = [] - - -class Server: - def __init__(self, config: Config) -> None: - self.config = config - self.server_state = ServerState() - - self.started = False - self.should_exit = False - self.force_exit = False - self.last_notified = 0.0 - - def run(self, sockets: Optional[List[socket.socket]] = None) -> None: - self.config.setup_event_loop() - return asyncio.run(self.serve(sockets=sockets)) - - async def serve(self, sockets: Optional[List[socket.socket]] = None) -> None: - process_id = os.getpid() - - config = self.config - if not config.loaded: - config.load() - - self.lifespan = config.lifespan_class(config) - - self.install_signal_handlers() - - message = "Started server process [%d]" - color_message = "Started server process [" + click.style("%d", fg="cyan") + "]" - logger.info(message, process_id, extra={"color_message": color_message}) - - await self.startup(sockets=sockets) - if self.should_exit: - return - await self.main_loop() - await self.shutdown(sockets=sockets) - - message = "Finished server process [%d]" - color_message = "Finished server process [" + click.style("%d", fg="cyan") + "]" - logger.info(message, process_id, extra={"color_message": color_message}) - - async def startup(self, sockets: Optional[List[socket.socket]] = None) -> None: - await self.lifespan.startup() - if self.lifespan.should_exit: - self.should_exit = True - return - - config = self.config - - def create_protocol( - _loop: Optional[asyncio.AbstractEventLoop] = None, - ) -> asyncio.Protocol: - return config.http_protocol_class( # type: ignore[call-arg] - config=config, - server_state=self.server_state, - app_state=self.lifespan.state, - _loop=_loop, - ) - - loop = asyncio.get_running_loop() - - listeners: Sequence[socket.SocketType] - if sockets is not None: - # Explicitly passed a list of open sockets. - # We use this when the server is run from a Gunicorn worker. - - def _share_socket( - sock: socket.SocketType, - ) -> socket.SocketType: # pragma py-linux pragma: py-darwin - # Windows requires the socket be explicitly shared across - # multiple workers (processes). - from socket import fromshare # type: ignore[attr-defined] - - sock_data = sock.share(os.getpid()) # type: ignore[attr-defined] - return fromshare(sock_data) - - self.servers: List[asyncio.base_events.Server] = [] - for sock in sockets: - is_windows = platform.system() == "Windows" - if config.workers > 1 and is_windows: # pragma: py-not-win32 - sock = _share_socket(sock) # type: ignore[assignment] - server = await loop.create_server( - create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog - ) - self.servers.append(server) - listeners = sockets - - elif config.fd is not None: # pragma: py-win32 - # Use an existing socket, from a file descriptor. - sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM) - server = await loop.create_server( - create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog - ) - assert server.sockets is not None # mypy - listeners = server.sockets - self.servers = [server] - - elif config.uds is not None: # pragma: py-win32 - # Create a socket using UNIX domain socket. - uds_perms = 0o666 - if os.path.exists(config.uds): - uds_perms = os.stat(config.uds).st_mode - server = await loop.create_unix_server( - create_protocol, path=config.uds, ssl=config.ssl, backlog=config.backlog - ) - os.chmod(config.uds, uds_perms) - assert server.sockets is not None # mypy - listeners = server.sockets - self.servers = [server] - - else: - # Standard case. Create a socket from a host/port pair. - try: - server = await loop.create_server( - create_protocol, - host=config.host, - port=config.port, - ssl=config.ssl, - backlog=config.backlog, - ) - except OSError as exc: - logger.error(exc) - await self.lifespan.shutdown() - sys.exit(1) - - assert server.sockets is not None - listeners = server.sockets - self.servers = [server] - - if sockets is None: - self._log_started_message(listeners) - else: - # We're most likely running multiple workers, so a message has already been - # logged by `config.bind_socket()`. - pass - - self.started = True - - def _log_started_message(self, listeners: Sequence[socket.SocketType]) -> None: - config = self.config - - if config.fd is not None: # pragma: py-win32 - sock = listeners[0] - logger.info( - "Uvicorn running on socket %s (Press CTRL+C to quit)", - sock.getsockname(), - ) - - elif config.uds is not None: # pragma: py-win32 - logger.info( - "Uvicorn running on unix socket %s (Press CTRL+C to quit)", config.uds - ) - - else: - addr_format = "%s://%s:%d" - host = "0.0.0.0" if config.host is None else config.host - if ":" in host: - # It's an IPv6 address. - addr_format = "%s://[%s]:%d" - - port = config.port - if port == 0: - port = listeners[0].getsockname()[1] - - protocol_name = "https" if config.ssl else "http" - message = f"Uvicorn running on {addr_format} (Press CTRL+C to quit)" - color_message = ( - "Uvicorn running on " - + click.style(addr_format, bold=True) - + " (Press CTRL+C to quit)" - ) - logger.info( - message, - protocol_name, - host, - port, - extra={"color_message": color_message}, - ) - - async def main_loop(self) -> None: - counter = 0 - should_exit = await self.on_tick(counter) - while not should_exit: - counter += 1 - counter = counter % 864000 - await asyncio.sleep(0.1) - should_exit = await self.on_tick(counter) - - async def on_tick(self, counter: int) -> bool: - # Update the default headers, once per second. - if counter % 10 == 0: - current_time = time.time() - current_date = formatdate(current_time, usegmt=True).encode() - - if self.config.date_header: - date_header = [(b"date", current_date)] - else: - date_header = [] - - self.server_state.default_headers = ( - date_header + self.config.encoded_headers - ) - - # Callback to `callback_notify` once every `timeout_notify` seconds. - if self.config.callback_notify is not None: - if current_time - self.last_notified > self.config.timeout_notify: - self.last_notified = current_time - await self.config.callback_notify() - - # Determine if we should exit. - if self.should_exit: - return True - if self.config.limit_max_requests is not None: - return self.server_state.total_requests >= self.config.limit_max_requests - return False - - async def shutdown(self, sockets: Optional[List[socket.socket]] = None) -> None: - logger.info("Shutting down") - - # Stop accepting new connections. - for server in self.servers: - server.close() - for sock in sockets or []: - sock.close() - for server in self.servers: - await server.wait_closed() - - # Request shutdown on all existing connections. - for connection in list(self.server_state.connections): - connection.shutdown() - await asyncio.sleep(0.1) - - # When 3.10 is not supported anymore, use `async with asyncio.timeout(...):`. - try: - await asyncio.wait_for( - self._wait_tasks_to_complete(), - timeout=self.config.timeout_graceful_shutdown, - ) - except asyncio.TimeoutError: - logger.error( - "Cancel %s running task(s), timeout graceful shutdown exceeded", - len(self.server_state.tasks), - ) - for t in self.server_state.tasks: - if sys.version_info < (3, 9): # pragma: py-gte-39 - t.cancel() - else: # pragma: py-lt-39 - t.cancel(msg="Task cancelled, timeout graceful shutdown exceeded") - - # Send the lifespan shutdown event, and wait for application shutdown. - if not self.force_exit: - await self.lifespan.shutdown() - - async def _wait_tasks_to_complete(self) -> None: - # Wait for existing connections to finish sending responses. - if self.server_state.connections and not self.force_exit: - msg = "Waiting for connections to close. (CTRL+C to force quit)" - logger.info(msg) - while self.server_state.connections and not self.force_exit: - await asyncio.sleep(0.1) - - # Wait for existing tasks to complete. - if self.server_state.tasks and not self.force_exit: - msg = "Waiting for background tasks to complete. (CTRL+C to force quit)" - logger.info(msg) - while self.server_state.tasks and not self.force_exit: - await asyncio.sleep(0.1) - - def install_signal_handlers(self) -> None: - if threading.current_thread() is not threading.main_thread(): - # Signals can only be listened to from the main thread. - return - - loop = asyncio.get_event_loop() - - try: - for sig in HANDLED_SIGNALS: - loop.add_signal_handler(sig, self.handle_exit, sig, None) - except NotImplementedError: # pragma: no cover - # Windows - for sig in HANDLED_SIGNALS: - signal.signal(sig, self.handle_exit) - - def handle_exit(self, sig: int, frame: Optional[FrameType]) -> None: - if self.should_exit and sig == signal.SIGINT: - self.force_exit = True - else: - self.should_exit = True diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/yaml/loader.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/yaml/loader.py deleted file mode 100644 index e90c11224c38e559cdf0cb205f0692ebd4fb8681..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/yaml/loader.py +++ /dev/null @@ -1,63 +0,0 @@ - -__all__ = ['BaseLoader', 'FullLoader', 'SafeLoader', 'Loader', 'UnsafeLoader'] - -from .reader import * -from .scanner import * -from .parser import * -from .composer import * -from .constructor import * -from .resolver import * - -class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): - - def __init__(self, stream): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - Composer.__init__(self) - BaseConstructor.__init__(self) - BaseResolver.__init__(self) - -class FullLoader(Reader, Scanner, Parser, Composer, FullConstructor, Resolver): - - def __init__(self, stream): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - Composer.__init__(self) - FullConstructor.__init__(self) - Resolver.__init__(self) - -class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): - - def __init__(self, stream): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - Composer.__init__(self) - SafeConstructor.__init__(self) - Resolver.__init__(self) - -class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): - - def __init__(self, stream): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - Composer.__init__(self) - Constructor.__init__(self) - Resolver.__init__(self) - -# UnsafeLoader is the same as Loader (which is and was always unsafe on -# untrusted input). Use of either Loader or UnsafeLoader should be rare, since -# FullLoad should be able to load almost all YAML safely. Loader is left intact -# to ensure backwards compatibility. -class UnsafeLoader(Reader, Scanner, Parser, Composer, Constructor, Resolver): - - def __init__(self, stream): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - Composer.__init__(self) - Constructor.__init__(self) - Resolver.__init__(self) diff --git a/spaces/pycui/RealChar/realtime_ai_character/static/index.html b/spaces/pycui/RealChar/realtime_ai_character/static/index.html deleted file mode 100644 index da025fe1a3ca57aabd1d117fa8bab3b9121f233a..0000000000000000000000000000000000000000 --- a/spaces/pycui/RealChar/realtime_ai_character/static/index.html +++ /dev/null @@ -1,112 +0,0 @@ - - - - - RealChar. - Realtime AI Character - - - - - - - -
      - Logo -
      - - -
      -

      This website is best viewed on a desktop browser.

      -

      Please switch to a desktop for the best experience.

      -

      Mobile version is coming soon!

      -

      If you have an iOS device, you can test our iOS beta app.

      -
      - - - -
      - -

      Please wear headphone 🎧 Recording

      -
      - -
      - -
      -
      - -
      -

      -
      - - - -
      -
      - -
      -
      -
      - - -
      - - -
      -
      -
      - - - - - - -
      - -
      - -
      -
      -
      -
      - End call icon -
      -
      - Continue call icon -
      -
      - - - - -
      - -
      - -
      -
      - Connect Icon -
      -
      - Message Icon -
      -
      - Connect Icon -
      -
      -
      - -
      -
      - - - -
      - -
      - - - - - diff --git a/spaces/qinzhu/Claude100K-API/app.py b/spaces/qinzhu/Claude100K-API/app.py deleted file mode 100644 index ea1790956adab1386dc3e631e257aa654e505835..0000000000000000000000000000000000000000 --- a/spaces/qinzhu/Claude100K-API/app.py +++ /dev/null @@ -1,55 +0,0 @@ -import gradio as gr -import requests -import json - -# Define a global variable for the conversation history -conversation_history = "" - -def chat_with_ai(user_question, api_key, model): - global conversation_history - - # Instantiate the endpoint URL - url = 'https://api.anthropic.com/v1/complete' - - # Define the headers for the HTTP request - headers = { - 'Content-Type': 'application/json', - 'X-API-Key': api_key, - } - conversation_history = "" - # Define the parameters for the request - params = { - 'prompt': f'{conversation_history}\n\nHuman: {user_question}\n\nAssistant:', - 'model': model, - 'max_tokens_to_sample': 4000, - 'stop_sequences': ['\n\nHuman:'], - 'temperature': 0.8, - 'top_p': -1, - 'metadata': {} - } - - # Convert the params dict to a JSON string - params_json = json.dumps(params) - - # Send the HTTP request to the API - response = requests.post(url, headers=headers, data=params_json) - - # Check if the request was successful - if response.status_code == 200: - # Parse the JSON response - response_json = response.json() - conversation_history += f'\n\nHuman: {user_question}\n\nAssistant: {response_json["completion"]}' - - # Return the entire conversation history - return conversation_history - else: - return f'Error: {response.status_code}' - -# Define the model options -model_options = ["claude-v1", "claude-v1-100k", "claude-v1.0", "claude-v1.2", "claude-v1.3", "claude-v1.3-100k", "claude-instant-v1", "claude-instant-v1-100k", "claude-instant-v1.0", "claude-instant-v1.1", "claude-instant-v1.1-100k"] - -iface = gr.Interface(fn=chat_with_ai, - inputs=["text", "text", gr.inputs.Dropdown(model_options)], - outputs="text", - layout="vertical") -iface.launch() diff --git a/spaces/quidiaMuxgu/Expedit-SAM/2 Girls 1 Cup 1 Finger.md b/spaces/quidiaMuxgu/Expedit-SAM/2 Girls 1 Cup 1 Finger.md deleted file mode 100644 index ce73cfe85ed7456eb1313aa395d4a6d50785630e..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/2 Girls 1 Cup 1 Finger.md +++ /dev/null @@ -1,6 +0,0 @@ -

      2 girls 1 cup 1 finger


      Download >>>>> https://geags.com/2uCrBN



      - - d5da3c52bf
      -
      -
      -

      diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Alhaudalkikahanipdfdownload WORK.md b/spaces/quidiaMuxgu/Expedit-SAM/Alhaudalkikahanipdfdownload WORK.md deleted file mode 100644 index 24849e928814d159c413338fe3ecfad5fcdf0a7a..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Alhaudalkikahanipdfdownload WORK.md +++ /dev/null @@ -1,18 +0,0 @@ -

      alhaudalkikahanipdfdownload


      Download Zip https://geags.com/2uCrxZ



      -
      -925 item. New York: Anamika, 2001. "The revival of Indian cinema is an important project, and this book goes some way in that direction, although it is not a new direction for Indian cinema, even though the original directions of Indian cinema was to recreate the classical theatre, a theater of its past, India's past. It is a beautiful edition, with an introduction by the author, a wonderful amount of notes on the filmography of each film, a very careful historical and critical analysis of each film as well as a chapter on the last 25 years, which is a landmark in the history of Indian cinema." Ken Hyder, author of Histories of the Indian Cinema and Scenes of the Indian Cinema - -[Divya Kumar]"A brilliant and sensitive study of the year that has witnessed two great moments in Indian cinema, one tragic and the other triumphant, one thought-provoking and the other mesmerising. For an encyclopedic knowledge of the revival of Indian cinema, this is a must-read."Amish Tripathi, bestselling author of THE KARMA OF OM - -[Kathryn Westcott]"Nandana Kumari's book vividly tells the story of the restoration of Indian cinema in a most entertaining way and I am sure that it will be appreciated by all lovers of Indian cinema."Devika Dhar, author of Notes on Contemporary Indian Cinema - -[P.J. Kurian]"Nandana Kumari is an in-depth and inspired expert on the subject of cinema of the East. She has a huge subject to handle and she has chosen to do so in the context of India's renaissance, bringing it down to earth with its people, past, and current. It is a high-end research which makes its publication an ambitious effort."Kalyanaraman, author of Early Cinema in South India and The Indian Cinema - -"[An] essential book that is a major contribution to a revival of interest in Indian cinema. This book is a must-read for all those interested in Indian cinema."Ranjit K. Gupta, author of Cinema in India - -"This book is a gem. It is beautifully designed, contains a wealth of information, and also takes the reader into the microcosm of Indian cinema's renaissance, the many films that have been made and the reasons behind them."Ranjit K. Gupta, author of Cinema in India - -[Amitava Sen]"Indian cinema is what it is today because of the profound cultural and 4fefd39f24
      -
      -
      -

      diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Cad Kas Pdf Editor 5 Crack _HOT_.md b/spaces/quidiaMuxgu/Expedit-SAM/Cad Kas Pdf Editor 5 Crack _HOT_.md deleted file mode 100644 index 4611abf2c436ebc91355c33d864d73aa4a27cd51..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Cad Kas Pdf Editor 5 Crack _HOT_.md +++ /dev/null @@ -1,8 +0,0 @@ - -

      program is fairly easy to use. it is a simple program. it's full of options and features. it even includes the ability to convert most any video format to most any other format. you can add effects, play music, add subtitles and a ton more! it doesn't get much easier than this - for this price, this is an absolute steal!

      -

      when you run it, you will be asked to choose where to install the program to. you can choose to install it to any of your drives. it won't install itself to your hard drive by default. this is a feature that you will need for other software installations. so, you will have to pay attention to what you install there or you might regret it later.

      -

      cad kas pdf editor 5 crack


      Download ····· https://geags.com/2uCqLn



      -

      once you have installed the program, you can start it. once you do that, it will be easy to use because it is very easy to navigate. all you have to do is create your first video. you need to choose the output format for the video, choose the resolution (which is the size of the video that you want the finished video to be when it is done. in most cases, this is 640x480, which is a very small video, so it is rare for most people to have to use anything higher than 640x480. you may even be able to choose a resolution of 320x240, which most computers can handle.

      -

      what to select? of course, you need to pick one of the formats that you want to use when you finish. a popular choice is mpeg-4 which is an excellent choice of format because it is a very commonly used format. since this program will not work without adobe flash, you need to purchase adobe flash first. many people have been disappointed in adobe after they bought adobe - but let's try adobe for this time to see how good it is. for this program, you only need to get the language file. this will be a smaller download. also, you need to get the download manager. this will let you download this program faster. after that, you will have to get adobe flash, quicktime, h.264 and quicktime pro. you will need to connect to the internet to download quicktime and adobe flash.

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Diablo3 Save Editor 2006crack BESTED0x9020.md b/spaces/quidiaMuxgu/Expedit-SAM/Diablo3 Save Editor 2006crack BESTED0x9020.md deleted file mode 100644 index 353e50e465b08d6a5e2c87b17960c70ec755703b..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Diablo3 Save Editor 2006crack BESTED0x9020.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Diablo3 Save Editor 2006CRACKED0x9020


      Download File > https://geags.com/2uCrxJ



      - - 3cee63e6c2
      -
      -
      -

      diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Fisicoquimica Basica Moore Pdf Free TOP.md b/spaces/quidiaMuxgu/Expedit-SAM/Fisicoquimica Basica Moore Pdf Free TOP.md deleted file mode 100644 index a1b18f5d028b07add6040fe6bb4e1efd897fafdf..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Fisicoquimica Basica Moore Pdf Free TOP.md +++ /dev/null @@ -1,7 +0,0 @@ - -

      besides, freecalc 5.0.9 crack allows you to generate a variety of tables that can be printed as a daily financial statement. in addition, you can print the list of all your accounts, the list of all your transactions and many more.

      -

      when the ph was increased in the presence of urea, the starch swelling was reduced. the swelling of the starch granules was slightly reduced with the increase in the ph of the reaction medium in the presence of urea. starch granules also swell in the presence of urea, to a greater degree in a low ph environment (moore and tuschhoff, 1987). these results indicate that the ph of the aqueous solution strongly affected the swelling of the starch granules. as the ph increased, the swelling capacity of the starch granules was reduced (figure 1). the results also show that the ph of the aqueous solution can affect the swelling properties of the starch granules. these results are in agreement with those reported by and with those reported by

      -

      fisicoquimica basica moore pdf free


      Downloadhttps://geags.com/2uCqs1



      -

      after the isolation process, the isolated starch was subjected to a washing step to remove the soluble matter and impurities of the medium. according to the washing step was necessary to remove the impurities in the medium that may have affected the starch swelling. the washing step was more efficient with the method 2 and for samples isolated with that method, the swelling capacity was greater than for the samples from method 1 and method 3. according to the swelling behavior of the isolated starch granules, all of the samples presented similar behavior. this behavior was characterized by increasing the ph of the medium in the presence of urea, leading to a reduction in the swelling capacity of the starch granules. this result can be explained by the chemical composition of the isolated starch granules, which is shown in it is observed that the isolated starches had high content of amylopectin. since the swelling capacity of the starch granules is related to the amylopectin content, the higher the amylopectin content is, the lower the swelling capacity of the starch granules will be. the amylopectin content of the isolated starches was higher than that of the native starch (asmara, 2010).

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/r3gm/AICoverGen/src/infer_pack/models_onnx.py b/spaces/r3gm/AICoverGen/src/infer_pack/models_onnx.py deleted file mode 100644 index b945eac8e59aac38fbd166da49eda01e2b8f4bd4..0000000000000000000000000000000000000000 --- a/spaces/r3gm/AICoverGen/src/infer_pack/models_onnx.py +++ /dev/null @@ -1,818 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMsNSFsidM(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - if self.gin_channels == 256: - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - else: - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - self.speaker_map = None - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def construct_spkmixmap(self, n_speaker): - self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels)) - for i in range(n_speaker): - self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]])) - self.speaker_map = self.speaker_map.unsqueeze(0) - - def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None): - if self.speaker_map is not None: # [N, S] * [S, B, 1, H] - g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1] - g = g * self.speaker_map # [N, S, B, 1, H] - g = torch.sum(g, dim=1) # [N, 1, B, 1, H] - g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N] - else: - g = g.unsqueeze(0) - g = self.emb_g(g).transpose(1, 2) - - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/r3gm/RVC_HF/demucs/raw.py b/spaces/r3gm/RVC_HF/demucs/raw.py deleted file mode 100644 index d4941ad2d7ed858f490db441f5b46b12bd61ad78..0000000000000000000000000000000000000000 --- a/spaces/r3gm/RVC_HF/demucs/raw.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import os -from collections import defaultdict, namedtuple -from pathlib import Path - -import musdb -import numpy as np -import torch as th -import tqdm -from torch.utils.data import DataLoader - -from .audio import AudioFile - -ChunkInfo = namedtuple("ChunkInfo", ["file_index", "offset", "local_index"]) - - -class Rawset: - """ - Dataset of raw, normalized, float32 audio files - """ - def __init__(self, path, samples=None, stride=None, channels=2, streams=None): - self.path = Path(path) - self.channels = channels - self.samples = samples - if stride is None: - stride = samples if samples is not None else 0 - self.stride = stride - entries = defaultdict(list) - for root, folders, files in os.walk(self.path, followlinks=True): - folders.sort() - files.sort() - for file in files: - if file.endswith(".raw"): - path = Path(root) / file - name, stream = path.stem.rsplit('.', 1) - entries[(path.parent.relative_to(self.path), name)].append(int(stream)) - - self._entries = list(entries.keys()) - - sizes = [] - self._lengths = [] - ref_streams = sorted(entries[self._entries[0]]) - assert ref_streams == list(range(len(ref_streams))) - if streams is None: - self.streams = ref_streams - else: - self.streams = streams - for entry in sorted(entries.keys()): - streams = entries[entry] - assert sorted(streams) == ref_streams - file = self._path(*entry) - length = file.stat().st_size // (4 * channels) - if samples is None: - sizes.append(1) - else: - if length < samples: - self._entries.remove(entry) - continue - sizes.append((length - samples) // stride + 1) - self._lengths.append(length) - if not sizes: - raise ValueError(f"Empty dataset {self.path}") - self._cumulative_sizes = np.cumsum(sizes) - self._sizes = sizes - - def __len__(self): - return self._cumulative_sizes[-1] - - @property - def total_length(self): - return sum(self._lengths) - - def chunk_info(self, index): - file_index = np.searchsorted(self._cumulative_sizes, index, side='right') - if file_index == 0: - local_index = index - else: - local_index = index - self._cumulative_sizes[file_index - 1] - return ChunkInfo(offset=local_index * self.stride, - file_index=file_index, - local_index=local_index) - - def _path(self, folder, name, stream=0): - return self.path / folder / (name + f'.{stream}.raw') - - def __getitem__(self, index): - chunk = self.chunk_info(index) - entry = self._entries[chunk.file_index] - - length = self.samples or self._lengths[chunk.file_index] - streams = [] - to_read = length * self.channels * 4 - for stream_index, stream in enumerate(self.streams): - offset = chunk.offset * 4 * self.channels - file = open(self._path(*entry, stream=stream), 'rb') - file.seek(offset) - content = file.read(to_read) - assert len(content) == to_read - content = np.frombuffer(content, dtype=np.float32) - content = content.copy() # make writable - streams.append(th.from_numpy(content).view(length, self.channels).t()) - return th.stack(streams, dim=0) - - def name(self, index): - chunk = self.chunk_info(index) - folder, name = self._entries[chunk.file_index] - return folder / name - - -class MusDBSet: - def __init__(self, mus, streams=slice(None), samplerate=44100, channels=2): - self.mus = mus - self.streams = streams - self.samplerate = samplerate - self.channels = channels - - def __len__(self): - return len(self.mus.tracks) - - def __getitem__(self, index): - track = self.mus.tracks[index] - return (track.name, AudioFile(track.path).read(channels=self.channels, - seek_time=0, - streams=self.streams, - samplerate=self.samplerate)) - - -def build_raw(mus, destination, normalize, workers, samplerate, channels): - destination.mkdir(parents=True, exist_ok=True) - loader = DataLoader(MusDBSet(mus, channels=channels, samplerate=samplerate), - batch_size=1, - num_workers=workers, - collate_fn=lambda x: x[0]) - for name, streams in tqdm.tqdm(loader): - if normalize: - ref = streams[0].mean(dim=0) # use mono mixture as reference - streams = (streams - ref.mean()) / ref.std() - for index, stream in enumerate(streams): - open(destination / (name + f'.{index}.raw'), "wb").write(stream.t().numpy().tobytes()) - - -def main(): - parser = argparse.ArgumentParser('rawset') - parser.add_argument('--workers', type=int, default=10) - parser.add_argument('--samplerate', type=int, default=44100) - parser.add_argument('--channels', type=int, default=2) - parser.add_argument('musdb', type=Path) - parser.add_argument('destination', type=Path) - - args = parser.parse_args() - - build_raw(musdb.DB(root=args.musdb, subsets=["train"], split="train"), - args.destination / "train", - normalize=True, - channels=args.channels, - samplerate=args.samplerate, - workers=args.workers) - build_raw(musdb.DB(root=args.musdb, subsets=["train"], split="valid"), - args.destination / "valid", - normalize=True, - samplerate=args.samplerate, - channels=args.channels, - workers=args.workers) - - -if __name__ == "__main__": - main() diff --git a/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/data/loaders/transforms.py b/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/data/loaders/transforms.py deleted file mode 100644 index 6371657516cd766c363fd7ea51bf978a65c70376..0000000000000000000000000000000000000000 --- a/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/data/loaders/transforms.py +++ /dev/null @@ -1,84 +0,0 @@ -import cv2 -import numpy as np -import torch - -from spiga.data.loaders.augmentors.modern_posit import PositPose -from spiga.data.loaders.augmentors.heatmaps import Heatmaps -from spiga.data.loaders.augmentors.boundary import AddBoundary -from spiga.data.loaders.augmentors.landmarks import HorizontalFlipAug, RSTAug, OcclusionAug, \ - LightingAug, BlurAug, TargetCropAug - - -def get_transformers(data_config): - - # Data augmentation - aug_names = data_config.aug_names - augmentors = [] - - if 'flip' in aug_names: - augmentors.append(HorizontalFlipAug(data_config.database.ldm_flip_order, data_config.hflip_prob)) - if 'rotate_scale' in aug_names: - augmentors.append(RSTAug(data_config.angle_range, data_config.scale_min, - data_config.scale_max, data_config.trl_ratio)) - if 'occlusion' in aug_names: - augmentors.append(OcclusionAug(data_config.occluded_min_len, - data_config.occluded_max_len, - data_config.database.num_landmarks)) - if 'lighting' in aug_names: - augmentors.append(LightingAug(data_config.hsv_range_min, data_config.hsv_range_max)) - if 'blur' in aug_names: - augmentors.append(BlurAug(data_config.blur_prob, data_config.blur_kernel_range)) - - # Crop mandatory - augmentors.append(TargetCropAug(data_config.image_size, data_config.ftmap_size, data_config.target_dist)) - # Opencv style - augmentors.append(ToOpencv()) - - # Gaussian heatmaps - if 'heatmaps2D' in aug_names: - augmentors.append(Heatmaps(data_config.database.num_landmarks, data_config.ftmap_size, - data_config.sigma2D, norm=data_config.heatmap2D_norm)) - - if 'boundaries' in aug_names: - augmentors.append(AddBoundary(num_landmarks=data_config.database.num_landmarks, - map_size=data_config.ftmap_size, - sigma=data_config.sigmaBD)) - # Pose generator - if data_config.generate_pose: - augmentors.append(PositPose(data_config.database.ldm_ids, - focal_ratio=data_config.focal_ratio, - selected_ids=data_config.posit_ids, - max_iter=data_config.posit_max_iter)) - - return augmentors - - -class ToOpencv: - def __call__(self, sample): - # Convert in a numpy array and change to GBR - image = np.array(sample['image']) - image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) - sample['image'] = image - return sample - - -class TargetCrop(TargetCropAug): - def __init__(self, crop_size=256, target_dist=1.6): - super(TargetCrop, self).__init__(crop_size, crop_size, target_dist) - - -class AddModel3D(PositPose): - def __init__(self, ldm_ids, ftmap_size=(256, 256), focal_ratio=1.5, totensor=False): - super(AddModel3D, self).__init__(ldm_ids, focal_ratio=focal_ratio) - img_bbox = [0, 0, ftmap_size[1], ftmap_size[0]] # Shapes given are inverted (y,x) - self.cam_matrix = self._camera_matrix(img_bbox) - - if totensor: - self.cam_matrix = torch.tensor(self.cam_matrix, dtype=torch.float) - self.model3d_world = torch.tensor(self.model3d_world, dtype=torch.float) - - def __call__(self, sample={}): - # Save intrinsic matrix and 3D model landmarks - sample['cam_matrix'] = self.cam_matrix - sample['model3d'] = self.model3d_world - return sample diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Adobe Flash Player For Mac Ppapi.md b/spaces/raedeXanto/academic-chatgpt-beta/Adobe Flash Player For Mac Ppapi.md deleted file mode 100644 index 71102a25bc094f5df690e3c03b6ad99b6a391150..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Adobe Flash Player For Mac Ppapi.md +++ /dev/null @@ -1,23 +0,0 @@ -
      -

      How to Install Adobe Flash Player for Mac PPAPI

      -

      Adobe Flash Player is a software that allows you to view and interact with multimedia content on the web, such as animations, games, videos, and more. However, Adobe has discontinued support for Flash Player since December 2020 and blocked Flash content from running in Flash Player since January 2021. This means that you can no longer use Flash Player on most web browsers, such as Safari, Chrome, Firefox, and Edge.

      -

      Adobe Flash Player For Mac Ppapi


      DOWNLOADhttps://tinourl.com/2uL0Kj



      -

      However, there is still a way to use Flash Player on some Chromium-based browsers, such as Opera and Brave. These browsers use a different plugin called PPAPI (Pepper Plugin API), which is not affected by Adobe's decision to end Flash support. PPAPI is a more secure and stable plugin that runs in a separate process from the browser. To use Flash Player on these browsers, you need to install Adobe Flash Player for Mac PPAPI.

      -

      Here are the steps to install Adobe Flash Player for Mac PPAPI:

      -
        -
      1. Download the latest version of Adobe Flash Player for Mac PPAPI from this link. The file name should be "flashplayer32pp_xa_install.dmg".
      2. -
      3. Open the downloaded file and double-click on the installer icon.
      4. -
      5. Follow the instructions on the screen to complete the installation.
      6. -
      7. Restart your browser and go to this page to check if Flash Player is working.
      8. -
      9. If you see a message that says "Flash Player is blocked", click on the lock icon next to the address bar and allow Flash content on the site.
      10. -
      11. If you see a message that says "Click to enable Adobe Flash Player", click on it and allow Flash content on the site.
      12. -
      13. If you see an animation or video playing, congratulations! You have successfully installed Adobe Flash Player for Mac PPAPI.
      14. -
      -

      Note: Adobe Flash Player for Mac PPAPI is only compatible with Chromium-based browsers that support PPAPI. It will not work on other browsers that use NPAPI (Netscape Plugin API) or ActiveX. Also, be aware that using Flash Player may expose you to security risks and performance issues, as Adobe no longer provides updates or patches for it. Use it at your own risk and discretion.

      - -

      Why would you want to use Flash Player on your Mac? Flash Player was once a popular and widely used technology for delivering rich media content on the web. Many websites and applications relied on Flash Player to display animations, games, videos, and more. However, Flash Player also had many drawbacks, such as security vulnerabilities, compatibility issues, performance problems, and high battery consumption. As a result, many web developers and users have switched to alternative technologies, such as HTML5, WebGL, and WebAssembly, which are more secure, stable, and efficient.

      -

      However, there may still be some cases where you need to use Flash Player on your Mac. For example, you may want to access some old websites or applications that have not been updated to use modern technologies. Or you may want to play some nostalgic Flash games that you used to enjoy. Or you may want to view some educational or artistic content that was created with Flash. In these situations, you can use Adobe Flash Player for Mac PPAPI to run Flash content on your Chromium-based browser.

      -

      -

      However, you should also be aware of the risks and limitations of using Flash Player on your Mac. As Adobe has stopped supporting Flash Player, you will not receive any updates or patches for it. This means that you may encounter security threats, bugs, or errors when using Flash Player. Also, some websites or applications may not work properly or at all with Flash Player. Moreover, Flash Player may slow down your browser or drain your battery faster. Therefore, you should only use Flash Player when necessary and disable it when not in use.

      cec2833e83
      -
      -
      \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Call Of Duty 1 Mp !!HOT!! Crack.md b/spaces/raedeXanto/academic-chatgpt-beta/Call Of Duty 1 Mp !!HOT!! Crack.md deleted file mode 100644 index 4088b4f7323de53aba8fa12f8744d0cdad42843a..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Call Of Duty 1 Mp !!HOT!! Crack.md +++ /dev/null @@ -1,42 +0,0 @@ - -

      How to Play Call of Duty 1 Multiplayer with a Cracked Version

      - -

      Call of Duty 1 is a classic first-person shooter game that was released in 2003. It has a single-player campaign mode and a multiplayer mode where you can compete with other players online. However, if you don't have the original CD or a Steam account, you might wonder how to play Call of Duty 1 multiplayer with a cracked version.

      -

      Call Of Duty 1 Mp Crack


      Download File ---> https://tinourl.com/2uL16s



      - -

      In this article, we will show you how to download and install Call of Duty 1 multiplayer crack, as well as some tips and tricks to enjoy the game without any problems.

      - -

      What is Call of Duty 1 Mp Crack?

      - -

      Call of Duty 1 Mp Crack is a modified version of the game that allows you to play multiplayer mode without having to authenticate your copy with the official servers. This means that you can play online with other players who have the same crack, or on private servers that don't require authentication.

      - -

      However, playing with a cracked version also has some drawbacks. For example, you won't be able to access the official servers or join games hosted by players who have the original version. You might also encounter some bugs, glitches, or compatibility issues with different versions of the game. Moreover, using a cracked version might be illegal in some countries, so you should be careful and check your local laws before downloading and installing it.

      - -

      How to Download and Install Call of Duty 1 Mp Crack?

      - -

      There are many sources where you can download Call of Duty 1 Mp Crack, but not all of them are reliable or safe. Some might contain viruses, malware, or unwanted programs that can harm your computer or steal your personal information. Therefore, you should always scan any file you download with an antivirus software before opening it.

      - -

      One of the most popular and trusted sources for Call of Duty 1 Mp Crack is MegaGames[^1^], which is a website that provides game fixes, trainers, mods, and cheats for various games. You can find the link to their website in the references section below.

      - -

      To download and install Call of Duty 1 Mp Crack from MegaGames, follow these steps:

      -

      - -
        -
      1. Go to their website and search for "Call of Duty v1.3 ENG".
      2. -
      3. Click on the first result and then click on "Download Now".
      4. -
      5. Save the file to your computer and extract it with a program like WinRAR or 7-Zip.
      6. -
      7. Copy the file "CoDSP.exe" from the extracted folder and paste it into your Call of Duty 1 installation folder (usually C:\Program Files\Call of Duty).
      8. -
      9. Replace the original file when prompted.
      10. -
      11. Launch the game from "CoDSP.exe" and enjoy multiplayer mode.
      12. -
      - -

      Tips and Tricks for Playing Call of Duty 1 Multiplayer with a Cracked Version

      - -

      Now that you have installed Call of Duty 1 Mp Crack, you might want to know how to find and join multiplayer games. Here are some tips and tricks that can help you:

      - -
        -
      • To find multiplayer servers that support cracked versions, you can use websites like GameTracker[^2^] or Game-Monitor[^3^], which list hundreds of servers for various games. You can filter them by game type, map, ping, players, etc. You can also add them to your favorites list for easy access.
      • -
      • To join a multiplayer server, you need to launch the game from "CoDSP.exe" and go to "Multiplayer" -> "Join Server". Then, enter the IP address and port number of the server you want to join (for example, 123.456.789.0:28960). Alternatively, you can use the console command "/connect IP:port" (without quotation marks) to join a server directly.
      • -
      • To create your own multiplayer server, you need to launch the game from "CoDSP.exe" and go to "Multiplayer" -> "Start New Server". Then, choose your game type, map, rules, etc. You can also use console commands to customize your server settings. To make your server visible to other players online, you need to forward your port number (usually

        81aa517590
        -
        -
        \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Bit Che Guevara 20 35 Crack [WORK].md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Bit Che Guevara 20 35 Crack [WORK].md deleted file mode 100644 index 4cacdeba98904202485832ee40f9bb280658cbf3..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Bit Che Guevara 20 35 Crack [WORK].md +++ /dev/null @@ -1,14 +0,0 @@ -

        Bit Che Guevara 20 35 Crack


        Download File ---> https://urlgoal.com/2uCKVS



        -
        -Oct 8, 1987 - Ernesto 'Che' Guevara: Rebel Against Soviet Political Economy. sugar.20 Between 1929 and 1932 the value of Cuban sugar in pesos.... From the 1930s through the 1970s, the value of sugar in the peso was -31 Aug. 2017 г. - Ernest Guevara was a Cuban revolutionary and fighter for the independence of Latin America. -His name has become synonymous with courage and -11 Feb. -2014 г. - ... and "Che Guevara." -And even earlier, in 1926, Guevara's book "Bolivian Diary" was published in Chile. -In the book Ernesto ...Duration: 1:41 Published: 11 Feb. -2014 г -19 Nov. 2016. - Since the 1950s, Che Guevara was for several years a key figure ... 8a78ff9644
        -
        -
        -

        diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Canon Service Support Tool Sst Software V4.11.rar.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Canon Service Support Tool Sst Software V4.11.rar.md deleted file mode 100644 index a1c556f92ee21cc5988833c5f948bf0d2a13428f..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Canon Service Support Tool Sst Software V4.11.rar.md +++ /dev/null @@ -1,8 +0,0 @@ -

        canon service support tool sst software v4.11.rar


        Download >>> https://urlgoal.com/2uCJJS



        - -Dec 27, 2018 - Canon service support tool SST software v4.11 - Download Utilities. Hi all. Want to know what's new in Service Support Tool V.4.74Em? Check it out. -Canon Service Tool V.4005, V.4005, V.3999, V.3900, V.3998, V.3969, V.3970, V.3990, V.3990, V.3969, V.3970, V.3969, V.3980, V.3980, V.3969, V.3990, V.3990, V.3969, V.3980 - Service Tool V.4005 for HP LaserJet P1005 and P1006. -Service Tool V.4005 for HP LaserJet P1005 and P1006 - Service Tool V.4005 for HP LaserJet P1005 and P1006 - Service Tool V.4005 for HP LaserJet P1005 and P1006 - Service Tool V.4005 for HP LaserJet P1005 and P1006 - Service Tool V.4005 for HP LaserJet P100 8a78ff9644
        -
        -
        -

        diff --git a/spaces/reddysh/pleasework/README.md b/spaces/reddysh/pleasework/README.md deleted file mode 100644 index 40397330d02a1476d441d296668c44559448f4e1..0000000000000000000000000000000000000000 --- a/spaces/reddysh/pleasework/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Pleasework -emoji: 💻 -colorFrom: indigo -colorTo: blue -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/rgres/Seg2Sat/frontend/build/_app/immutable/assets/pages/__layout.svelte-b67cf61d.css b/spaces/rgres/Seg2Sat/frontend/build/_app/immutable/assets/pages/__layout.svelte-b67cf61d.css deleted file mode 100644 index 11acbae8955923742bb73fadde38166957d5d33b..0000000000000000000000000000000000000000 --- a/spaces/rgres/Seg2Sat/frontend/build/_app/immutable/assets/pages/__layout.svelte-b67cf61d.css +++ /dev/null @@ -1 +0,0 @@ -@import"https://fonts.googleapis.com/css2?family=Open+Sans:wght@100;200;300;400;500;600;700;800&display=swap";*,:before,:after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}:before,:after{--tw-content: ""}html{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;-o-tab-size:4;tab-size:4;font-family:ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,"Apple Color Emoji","Segoe UI Emoji",Segoe UI Symbol,"Noto Color Emoji"}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;font-weight:inherit;line-height:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button;background-color:transparent;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dl,dd,h1,h2,h3,h4,h5,h6,hr,figure,p,pre{margin:0}fieldset{margin:0;padding:0}legend{padding:0}ol,ul,menu{list-style:none;margin:0;padding:0}textarea{resize:vertical}input::-moz-placeholder,textarea::-moz-placeholder{opacity:1;color:#9ca3af}input::placeholder,textarea::placeholder{opacity:1;color:#9ca3af}button,[role=button]{cursor:pointer}:disabled{cursor:default}img,svg,video,canvas,audio,iframe,embed,object{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}html{font-family:Open Sans,sans-serif}*,:before,:after{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }::-webkit-backdrop{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }::backdrop{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }.prose{color:var(--tw-prose-body);max-width:65ch}.prose :where([class~="lead"]):not(:where([class~="not-prose"] *)){color:var(--tw-prose-lead);font-size:1.25em;line-height:1.6;margin-top:1.2em;margin-bottom:1.2em}.prose :where(a):not(:where([class~="not-prose"] *)){color:var(--tw-prose-links);text-decoration:underline;font-weight:500}.prose :where(strong):not(:where([class~="not-prose"] *)){color:var(--tw-prose-bold);font-weight:600}.prose :where(ol):not(:where([class~="not-prose"] *)){list-style-type:decimal;padding-left:1.625em}.prose :where(ol[type="A"]):not(:where([class~="not-prose"] *)){list-style-type:upper-alpha}.prose :where(ol[type="a"]):not(:where([class~="not-prose"] *)){list-style-type:lower-alpha}.prose :where(ol[type="A" s]):not(:where([class~="not-prose"] *)){list-style-type:upper-alpha}.prose :where(ol[type="a" s]):not(:where([class~="not-prose"] *)){list-style-type:lower-alpha}.prose :where(ol[type="I"]):not(:where([class~="not-prose"] *)){list-style-type:upper-roman}.prose :where(ol[type="i"]):not(:where([class~="not-prose"] *)){list-style-type:lower-roman}.prose :where(ol[type="I" s]):not(:where([class~="not-prose"] *)){list-style-type:upper-roman}.prose :where(ol[type="i" s]):not(:where([class~="not-prose"] *)){list-style-type:lower-roman}.prose :where(ol[type="1"]):not(:where([class~="not-prose"] *)){list-style-type:decimal}.prose :where(ul):not(:where([class~="not-prose"] *)){list-style-type:disc;padding-left:1.625em}.prose :where(ol > li):not(:where([class~="not-prose"] *))::marker{font-weight:400;color:var(--tw-prose-counters)}.prose :where(ul > li):not(:where([class~="not-prose"] *))::marker{color:var(--tw-prose-bullets)}.prose :where(hr):not(:where([class~="not-prose"] *)){border-color:var(--tw-prose-hr);border-top-width:1px;margin-top:3em;margin-bottom:3em}.prose :where(blockquote):not(:where([class~="not-prose"] *)){font-weight:500;font-style:italic;color:var(--tw-prose-quotes);border-left-width:.25rem;border-left-color:var(--tw-prose-quote-borders);quotes:"\201c""\201d""\2018""\2019";margin-top:1.6em;margin-bottom:1.6em;padding-left:1em}.prose :where(h1):not(:where([class~="not-prose"] *)){color:var(--tw-prose-headings);font-weight:800;font-size:2.25em;margin-top:0;margin-bottom:.8888889em;line-height:1.1111111}.prose :where(h1 strong):not(:where([class~="not-prose"] *)){font-weight:900}.prose :where(h2):not(:where([class~="not-prose"] *)){color:var(--tw-prose-headings);font-weight:700;font-size:1.5em;margin-top:2em;margin-bottom:1em;line-height:1.3333333}.prose :where(h2 strong):not(:where([class~="not-prose"] *)){font-weight:800}.prose :where(h3):not(:where([class~="not-prose"] *)){color:var(--tw-prose-headings);font-weight:600;font-size:1.25em;margin-top:1.6em;margin-bottom:.6em;line-height:1.6}.prose :where(h3 strong):not(:where([class~="not-prose"] *)){font-weight:700}.prose :where(h4):not(:where([class~="not-prose"] *)){color:var(--tw-prose-headings);font-weight:600;margin-top:1.5em;margin-bottom:.5em;line-height:1.5}.prose :where(h4 strong):not(:where([class~="not-prose"] *)){font-weight:700}.prose :where(figure > *):not(:where([class~="not-prose"] *)){margin-top:0;margin-bottom:0}.prose :where(figcaption):not(:where([class~="not-prose"] *)){color:var(--tw-prose-captions);font-size:.875em;line-height:1.4285714;margin-top:.8571429em}.prose :where(a code):not(:where([class~="not-prose"] *)){color:var(--tw-prose-links)}.prose :where(pre code):not(:where([class~="not-prose"] *)):before{content:none}.prose :where(pre code):not(:where([class~="not-prose"] *)):after{content:none}.prose :where(table):not(:where([class~="not-prose"] *)){width:100%;table-layout:auto;text-align:left;margin-top:2em;margin-bottom:2em;font-size:.875em;line-height:1.7142857}.prose :where(thead):not(:where([class~="not-prose"] *)){border-bottom-width:1px;border-bottom-color:var(--tw-prose-th-borders)}.prose :where(thead th):not(:where([class~="not-prose"] *)){color:var(--tw-prose-headings);font-weight:600;vertical-align:bottom;padding-right:.5714286em;padding-bottom:.5714286em;padding-left:.5714286em}.prose :where(tbody tr):not(:where([class~="not-prose"] *)){border-bottom-width:1px;border-bottom-color:var(--tw-prose-td-borders)}.prose :where(tbody tr:last-child):not(:where([class~="not-prose"] *)){border-bottom-width:0}.prose :where(tbody td):not(:where([class~="not-prose"] *)){vertical-align:baseline;padding:.5714286em}.prose{--tw-prose-body: #374151;--tw-prose-headings: #111827;--tw-prose-lead: #4b5563;--tw-prose-links: #111827;--tw-prose-bold: #111827;--tw-prose-counters: #6b7280;--tw-prose-bullets: #d1d5db;--tw-prose-hr: #e5e7eb;--tw-prose-quotes: #111827;--tw-prose-quote-borders: #e5e7eb;--tw-prose-captions: #6b7280;--tw-prose-code: #111827;--tw-prose-pre-code: #e5e7eb;--tw-prose-pre-bg: #1f2937;--tw-prose-th-borders: #d1d5db;--tw-prose-td-borders: #e5e7eb;--tw-prose-invert-body: #d1d5db;--tw-prose-invert-headings: #fff;--tw-prose-invert-lead: #9ca3af;--tw-prose-invert-links: #fff;--tw-prose-invert-bold: #fff;--tw-prose-invert-counters: #9ca3af;--tw-prose-invert-bullets: #4b5563;--tw-prose-invert-hr: #374151;--tw-prose-invert-quotes: #f3f4f6;--tw-prose-invert-quote-borders: #374151;--tw-prose-invert-captions: #9ca3af;--tw-prose-invert-code: #fff;--tw-prose-invert-pre-code: #d1d5db;--tw-prose-invert-pre-bg: rgb(0 0 0 / 50%);--tw-prose-invert-th-borders: #4b5563;--tw-prose-invert-td-borders: #374151;font-size:1rem;line-height:1.75}.prose :where(p):not(:where([class~="not-prose"] *)){margin-top:1.25em;margin-bottom:1.25em}.prose :where(img):not(:where([class~="not-prose"] *)){margin-top:2em;margin-bottom:2em}.prose :where(video):not(:where([class~="not-prose"] *)){margin-top:2em;margin-bottom:2em}.prose :where(figure):not(:where([class~="not-prose"] *)){margin-top:2em;margin-bottom:2em}.prose :where(h2 code):not(:where([class~="not-prose"] *)){font-size:.875em}.prose :where(h3 code):not(:where([class~="not-prose"] *)){font-size:.9em}.prose :where(li):not(:where([class~="not-prose"] *)){margin-top:.5em;margin-bottom:.5em}.prose :where(ol > li):not(:where([class~="not-prose"] *)){padding-left:.375em}.prose :where(ul > li):not(:where([class~="not-prose"] *)){padding-left:.375em}.prose>:where(ul > li p):not(:where([class~="not-prose"] *)){margin-top:.75em;margin-bottom:.75em}.prose>:where(ul > li > *:first-child):not(:where([class~="not-prose"] *)){margin-top:1.25em}.prose>:where(ul > li > *:last-child):not(:where([class~="not-prose"] *)){margin-bottom:1.25em}.prose>:where(ol > li > *:first-child):not(:where([class~="not-prose"] *)){margin-top:1.25em}.prose>:where(ol > li > *:last-child):not(:where([class~="not-prose"] *)){margin-bottom:1.25em}.prose :where(ul ul,ul ol,ol ul,ol ol):not(:where([class~="not-prose"] *)){margin-top:.75em;margin-bottom:.75em}.prose :where(hr + *):not(:where([class~="not-prose"] *)){margin-top:0}.prose :where(h2 + *):not(:where([class~="not-prose"] *)){margin-top:0}.prose :where(h3 + *):not(:where([class~="not-prose"] *)){margin-top:0}.prose :where(h4 + *):not(:where([class~="not-prose"] *)){margin-top:0}.prose :where(thead th:first-child):not(:where([class~="not-prose"] *)){padding-left:0}.prose :where(thead th:last-child):not(:where([class~="not-prose"] *)){padding-right:0}.prose :where(tbody td:first-child):not(:where([class~="not-prose"] *)){padding-left:0}.prose :where(tbody td:last-child):not(:where([class~="not-prose"] *)){padding-right:0}.prose>:where(:first-child):not(:where([class~="not-prose"] *)){margin-top:0}.prose>:where(:last-child):not(:where([class~="not-prose"] *)){margin-bottom:0}.pointer-events-none{pointer-events:none}.absolute{position:absolute}.relative{position:relative}.bottom-0{bottom:0px}.left-0{left:0px}.top-0{top:0px}.right-0{right:0px}.z-0{z-index:0}.z-10{z-index:10}.z-20{z-index:20}.my-3{margin-top:.75rem;margin-bottom:.75rem}.my-6{margin-top:1.5rem;margin-bottom:1.5rem}.mx-auto{margin-left:auto;margin-right:auto}.-mx-3{margin-left:-.75rem;margin-right:-.75rem}.mt-6{margin-top:1.5rem}.mb-2{margin-bottom:.5rem}.box-border{box-sizing:border-box}.block{display:block}.flex{display:flex}.grid{display:grid}.hidden{display:none}.aspect-\[512\/512\]{aspect-ratio:512/512}.h-0{height:0px}.h-full{height:100%}.max-h-\[9rem\]{max-height:9rem}.max-h-24{max-height:6rem}.w-0{width:0px}.w-full{width:100%}.max-w-full{max-width:100%}.max-w-\[3rem\]{max-width:3rem}.max-w-screen-md{max-width:768px}.-translate-x-1\/2{--tw-translate-x: -50%;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skew(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}@-webkit-keyframes spin{to{transform:rotate(360deg)}}@keyframes spin{to{transform:rotate(360deg)}}.animate-spin{-webkit-animation:spin 1s linear infinite;animation:spin 1s linear infinite}.cursor-pointer{cursor:pointer}.snap-x{scroll-snap-type:x var(--tw-scroll-snap-strictness)}.snap-y{scroll-snap-type:y var(--tw-scroll-snap-strictness)}.snap-mandatory{--tw-scroll-snap-strictness: mandatory}.snap-start{scroll-snap-align:start}.snap-always{scroll-snap-stop:always}.grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.grid-cols-\[2fr_1\.5fr\]{grid-template-columns:2fr 1.5fr}.flex-col{flex-direction:column}.flex-nowrap{flex-wrap:nowrap}.items-center{align-items:center}.justify-center{justify-content:center}.gap-2{gap:.5rem}.gap-1{gap:.25rem}.overflow-hidden{overflow:hidden}.overflow-clip{overflow:clip}.overflow-scroll{overflow:scroll}.overflow-x-scroll{overflow-x:scroll}.whitespace-nowrap{white-space:nowrap}.rounded-lg{border-radius:.5rem}.border{border-width:1px}.border-gray-500{--tw-border-opacity: 1;border-color:rgb(107 114 128 / var(--tw-border-opacity))}.border-gray-300{--tw-border-opacity: 1;border-color:rgb(209 213 219 / var(--tw-border-opacity))}.bg-gray-50{--tw-bg-opacity: 1;background-color:rgb(249 250 251 / var(--tw-bg-opacity))}.p-3{padding:.75rem}.p-1{padding:.25rem}.px-2{padding-left:.5rem;padding-right:.5rem}.px-3{padding-left:.75rem;padding-right:.75rem}.py-5{padding-top:1.25rem;padding-bottom:1.25rem}.py-3{padding-top:.75rem;padding-bottom:.75rem}.pl-2{padding-left:.5rem}.text-base{font-size:1rem;line-height:1.5rem}.text-sm{font-size:.875rem;line-height:1.25rem}.text-xs{font-size:.75rem;line-height:1rem}.font-bold{font-weight:700}.leading-6{line-height:1.5rem}.text-white{--tw-text-opacity: 1;color:rgb(255 255 255 / var(--tw-text-opacity))}.text-gray-900{--tw-text-opacity: 1;color:rgb(17 24 39 / var(--tw-text-opacity))}.opacity-0{opacity:0}.opacity-30{opacity:.3}.outline{outline-style:solid}.outline-2{outline-width:2px}.outline-offset-\[-2px\]{outline-offset:-2px}.transition-all{transition-property:all;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.15s}.duration-200{transition-duration:.2s}.ease-in-out{transition-timing-function:cubic-bezier(.4,0,.2,1)}.hover\:outline:hover{outline-style:solid}.focus\:border-blue-500:focus{--tw-border-opacity: 1;border-color:rgb(59 130 246 / var(--tw-border-opacity))}.focus\:ring-blue-500:focus{--tw-ring-opacity: 1;--tw-ring-color: rgb(59 130 246 / var(--tw-ring-opacity))}.disabled\:opacity-50:disabled{opacity:.5}@media (prefers-color-scheme: dark){.dark\:border-gray-300{--tw-border-opacity: 1;border-color:rgb(209 213 219 / var(--tw-border-opacity))}.dark\:bg-gray-50{--tw-bg-opacity: 1;background-color:rgb(249 250 251 / var(--tw-bg-opacity))}.dark\:focus\:ring-blue-500:focus{--tw-ring-opacity: 1;--tw-ring-color: rgb(59 130 246 / var(--tw-ring-opacity))}}@media (min-width: 530px){.sm\:max-h-\[none\]{max-height:none}.sm\:grid-cols-3{grid-template-columns:repeat(3,minmax(0,1fr))}.sm\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.sm\:flex-row{flex-direction:row}} diff --git a/spaces/riccorl/relik-entity-linking/relik/reader/relik_reader_core.py b/spaces/riccorl/relik-entity-linking/relik/reader/relik_reader_core.py deleted file mode 100644 index 1d62c5f13b3c1f7e7ba02209d2c88813d4f960ac..0000000000000000000000000000000000000000 --- a/spaces/riccorl/relik-entity-linking/relik/reader/relik_reader_core.py +++ /dev/null @@ -1,497 +0,0 @@ -import collections -from typing import Any, Dict, Iterator, List, Optional - -import torch -from transformers import AutoModel -from transformers.activations import ClippedGELUActivation, GELUActivation -from transformers.modeling_utils import PoolerEndLogits - -from relik.reader.data.relik_reader_sample import RelikReaderSample - -activation2functions = { - "relu": torch.nn.ReLU(), - "gelu": GELUActivation(), - "gelu_10": ClippedGELUActivation(-10, 10), -} - - -class RelikReaderCoreModel(torch.nn.Module): - def __init__( - self, - transformer_model: str, - additional_special_symbols: int, - num_layers: Optional[int] = None, - activation: str = "gelu", - linears_hidden_size: Optional[int] = 512, - use_last_k_layers: int = 1, - training: bool = False, - ) -> None: - super().__init__() - - # Transformer model declaration - self.transformer_model_name = transformer_model - self.transformer_model = ( - AutoModel.from_pretrained(transformer_model) - if num_layers is None - else AutoModel.from_pretrained( - transformer_model, num_hidden_layers=num_layers - ) - ) - self.transformer_model.resize_token_embeddings( - self.transformer_model.config.vocab_size + additional_special_symbols - ) - - self.activation = activation - self.linears_hidden_size = linears_hidden_size - self.use_last_k_layers = use_last_k_layers - - # named entity detection layers - self.ned_start_classifier = self._get_projection_layer( - self.activation, last_hidden=2, layer_norm=False - ) - self.ned_end_classifier = PoolerEndLogits(self.transformer_model.config) - - # END entity disambiguation layer - self.ed_start_projector = self._get_projection_layer(self.activation) - self.ed_end_projector = self._get_projection_layer(self.activation) - - self.training = training - - # criterion - self.criterion = torch.nn.CrossEntropyLoss() - - def _get_projection_layer( - self, - activation: str, - last_hidden: Optional[int] = None, - input_hidden=None, - layer_norm: bool = True, - ) -> torch.nn.Sequential: - head_components = [ - torch.nn.Dropout(0.1), - torch.nn.Linear( - self.transformer_model.config.hidden_size * self.use_last_k_layers - if input_hidden is None - else input_hidden, - self.linears_hidden_size, - ), - activation2functions[activation], - torch.nn.Dropout(0.1), - torch.nn.Linear( - self.linears_hidden_size, - self.linears_hidden_size if last_hidden is None else last_hidden, - ), - ] - - if layer_norm: - head_components.append( - torch.nn.LayerNorm( - self.linears_hidden_size if last_hidden is None else last_hidden, - self.transformer_model.config.layer_norm_eps, - ) - ) - - return torch.nn.Sequential(*head_components) - - def _mask_logits(self, logits: torch.Tensor, mask: torch.Tensor) -> torch.Tensor: - mask = mask.unsqueeze(-1) - if next(self.parameters()).dtype == torch.float16: - logits = logits * (1 - mask) - 65500 * mask - else: - logits = logits * (1 - mask) - 1e30 * mask - return logits - - def _get_model_features( - self, - input_ids: torch.Tensor, - attention_mask: torch.Tensor, - token_type_ids: Optional[torch.Tensor], - ): - model_input = { - "input_ids": input_ids, - "attention_mask": attention_mask, - "output_hidden_states": self.use_last_k_layers > 1, - } - - if token_type_ids is not None: - model_input["token_type_ids"] = token_type_ids - - model_output = self.transformer_model(**model_input) - - if self.use_last_k_layers > 1: - model_features = torch.cat( - model_output[1][-self.use_last_k_layers :], dim=-1 - ) - else: - model_features = model_output[0] - - return model_features - - def compute_ned_end_logits( - self, - start_predictions, - start_labels, - model_features, - prediction_mask, - batch_size, - ) -> Optional[torch.Tensor]: - # todo: maybe when constraining on the spans, - # we should not use a prediction_mask for the end tokens. - # at least we should not during training imo - start_positions = start_labels if self.training else start_predictions - start_positions_indices = ( - torch.arange(start_positions.size(1), device=start_positions.device) - .unsqueeze(0) - .expand(batch_size, -1)[start_positions > 0] - ).to(start_positions.device) - - if len(start_positions_indices) > 0: - expanded_features = torch.cat( - [ - model_features[i].unsqueeze(0).expand(x, -1, -1) - for i, x in enumerate(torch.sum(start_positions > 0, dim=-1)) - if x > 0 - ], - dim=0, - ).to(start_positions_indices.device) - - expanded_prediction_mask = torch.cat( - [ - prediction_mask[i].unsqueeze(0).expand(x, -1) - for i, x in enumerate(torch.sum(start_positions > 0, dim=-1)) - if x > 0 - ], - dim=0, - ).to(expanded_features.device) - - end_logits = self.ned_end_classifier( - hidden_states=expanded_features, - start_positions=start_positions_indices, - p_mask=expanded_prediction_mask, - ) - - return end_logits - - return None - - def compute_classification_logits( - self, - model_features, - special_symbols_mask, - prediction_mask, - batch_size, - start_positions=None, - end_positions=None, - ) -> torch.Tensor: - if start_positions is None or end_positions is None: - start_positions = torch.zeros_like(prediction_mask) - end_positions = torch.zeros_like(prediction_mask) - - model_start_features = self.ed_start_projector(model_features) - model_end_features = self.ed_end_projector(model_features) - model_end_features[start_positions > 0] = model_end_features[end_positions > 0] - - model_ed_features = torch.cat( - [model_start_features, model_end_features], dim=-1 - ) - - # computing ed features - classes_representations = torch.sum(special_symbols_mask, dim=1)[0].item() - special_symbols_representation = model_ed_features[special_symbols_mask].view( - batch_size, classes_representations, -1 - ) - - logits = torch.bmm( - model_ed_features, - torch.permute(special_symbols_representation, (0, 2, 1)), - ) - - logits = self._mask_logits(logits, prediction_mask) - - return logits - - def forward( - self, - input_ids: torch.Tensor, - attention_mask: torch.Tensor, - token_type_ids: Optional[torch.Tensor] = None, - prediction_mask: Optional[torch.Tensor] = None, - special_symbols_mask: Optional[torch.Tensor] = None, - start_labels: Optional[torch.Tensor] = None, - end_labels: Optional[torch.Tensor] = None, - use_predefined_spans: bool = False, - *args, - **kwargs, - ) -> Dict[str, Any]: - batch_size, seq_len = input_ids.shape - - model_features = self._get_model_features( - input_ids, attention_mask, token_type_ids - ) - - # named entity detection if required - if use_predefined_spans: # no need to compute spans - ned_start_logits, ned_start_probabilities, ned_start_predictions = ( - None, - None, - torch.clone(start_labels) - if start_labels is not None - else torch.zeros_like(input_ids), - ) - ned_end_logits, ned_end_probabilities, ned_end_predictions = ( - None, - None, - torch.clone(end_labels) - if end_labels is not None - else torch.zeros_like(input_ids), - ) - - ned_start_predictions[ned_start_predictions > 0] = 1 - ned_end_predictions[ned_end_predictions > 0] = 1 - - else: # compute spans - # start boundary prediction - ned_start_logits = self.ned_start_classifier(model_features) - ned_start_logits = self._mask_logits(ned_start_logits, prediction_mask) - ned_start_probabilities = torch.softmax(ned_start_logits, dim=-1) - ned_start_predictions = ned_start_probabilities.argmax(dim=-1) - - # end boundary prediction - ned_start_labels = ( - torch.zeros_like(start_labels) if start_labels is not None else None - ) - - if ned_start_labels is not None: - ned_start_labels[start_labels == -100] = -100 - ned_start_labels[start_labels > 0] = 1 - - ned_end_logits = self.compute_ned_end_logits( - ned_start_predictions, - ned_start_labels, - model_features, - prediction_mask, - batch_size, - ) - - if ned_end_logits is not None: - ned_end_probabilities = torch.softmax(ned_end_logits, dim=-1) - ned_end_predictions = torch.argmax(ned_end_probabilities, dim=-1) - else: - ned_end_logits, ned_end_probabilities = None, None - ned_end_predictions = ned_start_predictions.new_zeros(batch_size) - - # flattening end predictions - # (flattening can happen only if the - # end boundaries were not predicted using the gold labels) - if not self.training: - flattened_end_predictions = torch.clone(ned_start_predictions) - flattened_end_predictions[flattened_end_predictions > 0] = 0 - - batch_start_predictions = list() - for elem_idx in range(batch_size): - batch_start_predictions.append( - torch.where(ned_start_predictions[elem_idx] > 0)[0].tolist() - ) - - # check that the total number of start predictions - # is equal to the end predictions - total_start_predictions = sum(map(len, batch_start_predictions)) - total_end_predictions = len(ned_end_predictions) - assert ( - total_start_predictions == 0 - or total_start_predictions == total_end_predictions - ), ( - f"Total number of start predictions = {total_start_predictions}. " - f"Total number of end predictions = {total_end_predictions}" - ) - - curr_end_pred_num = 0 - for elem_idx, bsp in enumerate(batch_start_predictions): - for sp in bsp: - ep = ned_end_predictions[curr_end_pred_num].item() - if ep < sp: - ep = sp - - # if we already set this span throw it (no overlap) - if flattened_end_predictions[elem_idx, ep] == 1: - ned_start_predictions[elem_idx, sp] = 0 - else: - flattened_end_predictions[elem_idx, ep] = 1 - - curr_end_pred_num += 1 - - ned_end_predictions = flattened_end_predictions - - start_position, end_position = ( - (start_labels, end_labels) - if self.training - else (ned_start_predictions, ned_end_predictions) - ) - - # Entity disambiguation - ed_logits = self.compute_classification_logits( - model_features, - special_symbols_mask, - prediction_mask, - batch_size, - start_position, - end_position, - ) - ed_probabilities = torch.softmax(ed_logits, dim=-1) - ed_predictions = torch.argmax(ed_probabilities, dim=-1) - - # output build - output_dict = dict( - batch_size=batch_size, - ned_start_logits=ned_start_logits, - ned_start_probabilities=ned_start_probabilities, - ned_start_predictions=ned_start_predictions, - ned_end_logits=ned_end_logits, - ned_end_probabilities=ned_end_probabilities, - ned_end_predictions=ned_end_predictions, - ed_logits=ed_logits, - ed_probabilities=ed_probabilities, - ed_predictions=ed_predictions, - ) - - # compute loss if labels - if start_labels is not None and end_labels is not None and self.training: - # named entity detection loss - - # start - if ned_start_logits is not None: - ned_start_loss = self.criterion( - ned_start_logits.view(-1, ned_start_logits.shape[-1]), - ned_start_labels.view(-1), - ) - else: - ned_start_loss = 0 - - # end - if ned_end_logits is not None: - ned_end_labels = torch.zeros_like(end_labels) - ned_end_labels[end_labels == -100] = -100 - ned_end_labels[end_labels > 0] = 1 - - ned_end_loss = self.criterion( - ned_end_logits, - ( - torch.arange( - ned_end_labels.size(1), device=ned_end_labels.device - ) - .unsqueeze(0) - .expand(batch_size, -1)[ned_end_labels > 0] - ).to(ned_end_labels.device), - ) - - else: - ned_end_loss = 0 - - # entity disambiguation loss - start_labels[ned_start_labels != 1] = -100 - ed_labels = torch.clone(start_labels) - ed_labels[end_labels > 0] = end_labels[end_labels > 0] - ed_loss = self.criterion( - ed_logits.view(-1, ed_logits.shape[-1]), - ed_labels.view(-1), - ) - - output_dict["ned_start_loss"] = ned_start_loss - output_dict["ned_end_loss"] = ned_end_loss - output_dict["ed_loss"] = ed_loss - - output_dict["loss"] = ned_start_loss + ned_end_loss + ed_loss - - return output_dict - - def batch_predict( - self, - input_ids: torch.Tensor, - attention_mask: torch.Tensor, - token_type_ids: Optional[torch.Tensor] = None, - prediction_mask: Optional[torch.Tensor] = None, - special_symbols_mask: Optional[torch.Tensor] = None, - sample: Optional[List[RelikReaderSample]] = None, - top_k: int = 5, # the amount of top-k most probable entities to predict - *args, - **kwargs, - ) -> Iterator[RelikReaderSample]: - forward_output = self.forward( - input_ids, - attention_mask, - token_type_ids, - prediction_mask, - special_symbols_mask, - ) - - ned_start_predictions = forward_output["ned_start_predictions"].cpu().numpy() - ned_end_predictions = forward_output["ned_end_predictions"].cpu().numpy() - ed_predictions = forward_output["ed_predictions"].cpu().numpy() - ed_probabilities = forward_output["ed_probabilities"].cpu().numpy() - - batch_predictable_candidates = kwargs["predictable_candidates"] - patch_offset = kwargs["patch_offset"] - for ts, ne_sp, ne_ep, edp, edpr, pred_cands, po in zip( - sample, - ned_start_predictions, - ned_end_predictions, - ed_predictions, - ed_probabilities, - batch_predictable_candidates, - patch_offset, - ): - ne_start_indices = [ti for ti, c in enumerate(ne_sp[1:]) if c > 0] - ne_end_indices = [ti for ti, c in enumerate(ne_ep[1:]) if c > 0] - - final_class2predicted_spans = collections.defaultdict(list) - spans2predicted_probabilities = dict() - for start_token_index, end_token_index in zip( - ne_start_indices, ne_end_indices - ): - # predicted candidate - token_class = edp[start_token_index + 1] - 1 - predicted_candidate_title = pred_cands[token_class] - final_class2predicted_spans[predicted_candidate_title].append( - [start_token_index, end_token_index] - ) - - # candidates probabilities - classes_probabilities = edpr[start_token_index + 1] - classes_probabilities_best_indices = classes_probabilities.argsort()[ - ::-1 - ] - titles_2_probs = [] - top_k = ( - min( - top_k, - len(classes_probabilities_best_indices), - ) - if top_k != -1 - else len(classes_probabilities_best_indices) - ) - for i in range(top_k): - titles_2_probs.append( - ( - pred_cands[classes_probabilities_best_indices[i] - 1], - classes_probabilities[ - classes_probabilities_best_indices[i] - ].item(), - ) - ) - spans2predicted_probabilities[ - (start_token_index, end_token_index) - ] = titles_2_probs - - if "patches" not in ts._d: - ts._d["patches"] = dict() - - ts._d["patches"][po] = dict() - sample_patch = ts._d["patches"][po] - - sample_patch["predicted_window_labels"] = final_class2predicted_spans - sample_patch["span_title_probabilities"] = spans2predicted_probabilities - - # additional info - sample_patch["predictable_candidates"] = pred_cands - - yield ts diff --git a/spaces/rizam/rakeeb_text-classification/README.md b/spaces/rizam/rakeeb_text-classification/README.md deleted file mode 100644 index e5df1e5e616c10064a603564038b94900f074713..0000000000000000000000000000000000000000 --- a/spaces/rizam/rakeeb_text-classification/README.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Text Classification/ Sentiment analysis -emoji: 📸 -colorFrom: yellow -colorTo: pink -sdk: gradio -sdk_version: 3.1.7 -app_file: app.py -pinned: false -duplicated_from: ThankGod/text-classification ---- - -[Try Demo Text classification Here](https://huggingface.co/spaces/ThankGod/text-classification) - -## Credits -- Hugging face 🤗 for hosting this demo. -- Hugging face transformer model for text classification transformer model -- Gradio for the beautiful visualization dashboards. - -## References -- https://gradio.app/ -- https://huggingface.co/ diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/backbones/detectors_resnext.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/backbones/detectors_resnext.py deleted file mode 100644 index 5e8b20a0266a9d7e37ff1d39b3a160abef565c85..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/backbones/detectors_resnext.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -from mmcv.cnn import build_conv_layer, build_norm_layer - -from ..builder import BACKBONES -from .detectors_resnet import Bottleneck as _Bottleneck -from .detectors_resnet import DetectoRS_ResNet - - -class Bottleneck(_Bottleneck): - expansion = 4 - - def __init__(self, - inplanes, - planes, - groups=1, - base_width=4, - base_channels=64, - **kwargs): - """Bottleneck block for ResNeXt. - - If style is "pytorch", the stride-two layer is the 3x3 conv layer, if - it is "caffe", the stride-two layer is the first 1x1 conv layer. - """ - super(Bottleneck, self).__init__(inplanes, planes, **kwargs) - - if groups == 1: - width = self.planes - else: - width = math.floor(self.planes * - (base_width / base_channels)) * groups - - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, width, postfix=1) - self.norm2_name, norm2 = build_norm_layer( - self.norm_cfg, width, postfix=2) - self.norm3_name, norm3 = build_norm_layer( - self.norm_cfg, self.planes * self.expansion, postfix=3) - - self.conv1 = build_conv_layer( - self.conv_cfg, - self.inplanes, - width, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - fallback_on_stride = False - self.with_modulated_dcn = False - if self.with_dcn: - fallback_on_stride = self.dcn.pop('fallback_on_stride', False) - if self.with_sac: - self.conv2 = build_conv_layer( - self.sac, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - elif not self.with_dcn or fallback_on_stride: - self.conv2 = build_conv_layer( - self.conv_cfg, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - else: - assert self.conv_cfg is None, 'conv_cfg must be None for DCN' - self.conv2 = build_conv_layer( - self.dcn, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - - self.add_module(self.norm2_name, norm2) - self.conv3 = build_conv_layer( - self.conv_cfg, - width, - self.planes * self.expansion, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - -@BACKBONES.register_module() -class DetectoRS_ResNeXt(DetectoRS_ResNet): - """ResNeXt backbone for DetectoRS. - - Args: - groups (int): The number of groups in ResNeXt. - base_width (int): The base width of ResNeXt. - """ - - arch_settings = { - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)) - } - - def __init__(self, groups=1, base_width=4, **kwargs): - self.groups = groups - self.base_width = base_width - super(DetectoRS_ResNeXt, self).__init__(**kwargs) - - def make_res_layer(self, **kwargs): - return super().make_res_layer( - groups=self.groups, - base_width=self.base_width, - base_channels=self.base_channels, - **kwargs) diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/projects/instance_segment_anything/models/hdetr/hdetr_wrapper.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/projects/instance_segment_anything/models/hdetr/hdetr_wrapper.py deleted file mode 100644 index 3dd767f5f798088905b1322730197ca60ef6cceb..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/projects/instance_segment_anything/models/hdetr/hdetr_wrapper.py +++ /dev/null @@ -1,138 +0,0 @@ -import torch -import torch.nn.functional as F -from mmcv.runner import BaseModule - -from .models import build_model -from .models.util.misc import NestedTensor, inverse_sigmoid - - -class HDetrWrapper(BaseModule): - def __init__(self, - args=None, - init_cfg=None): - super(HDetrWrapper, self).__init__(init_cfg) - model, box_postprocessor = build_model(args) - self.model = model - self.box_postprocessor = box_postprocessor - - self.model.num_queries = self.model.num_queries_one2one - self.model.transformer.two_stage_num_proposals = self.model.num_queries - self.cls_index = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, - 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, - 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, - 82, 84, 85, 86, 87, 88, 89, 90] - - def forward(self, - img, - img_metas): - """Forward function for training mode. - Args: - img (Tensor): of shape (N, C, H, W) encoding input images. - Typically these should be mean centered and std scaled. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - """ - input_img_h, input_img_w = img_metas[0]["batch_input_shape"] - batch_size = img.size(0) - img_masks = img.new_ones((batch_size, input_img_h, input_img_w), - dtype=torch.bool) - for img_id in range(batch_size): - img_h, img_w, _ = img_metas[img_id]["img_shape"] - img_masks[img_id, :img_h, :img_w] = False - samples = NestedTensor(tensors=img, mask=img_masks) - features, pos = self.model.backbone(samples) - - srcs = [] - masks = [] - for l, feat in enumerate(features): - src, mask = feat.decompose() - srcs.append(self.model.input_proj[l](src)) - masks.append(mask) - assert mask is not None - if self.model.num_feature_levels > len(srcs): - _len_srcs = len(srcs) - for l in range(_len_srcs, self.model.num_feature_levels): - if l == _len_srcs: - src = self.model.input_proj[l](features[-1].tensors) - else: - src = self.model.input_proj[l](srcs[-1]) - m = samples.mask - mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to( - torch.bool - )[0] - pos_l = self.model.backbone[1](NestedTensor(src, mask)).to(src.dtype) - srcs.append(src) - masks.append(mask) - pos.append(pos_l) - - query_embeds = None - if not self.model.two_stage or self.model.mixed_selection: - query_embeds = self.model.query_embed.weight[0: self.model.num_queries, :] - - # make attn mask - """ attention mask to prevent information leakage - """ - self_attn_mask = ( - torch.zeros([self.model.num_queries, self.model.num_queries, ]).bool().to(src.device) - ) - self_attn_mask[self.model.num_queries_one2one:, 0: self.model.num_queries_one2one, ] = True - self_attn_mask[0: self.model.num_queries_one2one, self.model.num_queries_one2one:, ] = True - - ( - hs, - init_reference, - inter_references, - enc_outputs_class, - enc_outputs_coord_unact, - ) = self.model.transformer(srcs, masks, pos, query_embeds, self_attn_mask) - - outputs_classes_one2one = [] - outputs_coords_one2one = [] - outputs_classes_one2many = [] - outputs_coords_one2many = [] - for lvl in range(hs.shape[0]): - if lvl == 0: - reference = init_reference - else: - reference = inter_references[lvl - 1] - reference = inverse_sigmoid(reference) - outputs_class = self.model.class_embed[lvl](hs[lvl]) - tmp = self.model.bbox_embed[lvl](hs[lvl]) - if reference.shape[-1] == 4: - tmp += reference - else: - assert reference.shape[-1] == 2 - tmp[..., :2] += reference - outputs_coord = tmp.sigmoid() - - outputs_classes_one2one.append( - outputs_class[:, 0: self.model.num_queries_one2one] - ) - outputs_classes_one2many.append( - outputs_class[:, self.model.num_queries_one2one:] - ) - outputs_coords_one2one.append( - outputs_coord[:, 0: self.model.num_queries_one2one] - ) - outputs_coords_one2many.append(outputs_coord[:, self.model.num_queries_one2one:]) - outputs_classes_one2one = torch.stack(outputs_classes_one2one) - outputs_coords_one2one = torch.stack(outputs_coords_one2one) - - sampled_logits = outputs_classes_one2one[-1][:, :, self.cls_index] - out = { - "pred_logits": sampled_logits, - "pred_boxes": outputs_coords_one2one[-1], - } - return out - - def simple_test(self, img, img_metas, rescale=False): - # out: dict - out = self(img, img_metas) - if rescale: - ori_target_sizes = [meta_info['ori_shape'][:2] for meta_info in img_metas] - else: - ori_target_sizes = [meta_info['img_shape'][:2] for meta_info in img_metas] - ori_target_sizes = out['pred_logits'].new_tensor(ori_target_sizes, dtype=torch.int64) - # results: List[dict(scores, labels, boxes)] - results = self.box_postprocessor(out, ori_target_sizes) - return results diff --git a/spaces/rorallitri/biomedical-language-models/logs/Download Mac Miller Swimming Zip and Hear His Legacy.md b/spaces/rorallitri/biomedical-language-models/logs/Download Mac Miller Swimming Zip and Hear His Legacy.md deleted file mode 100644 index 12d8935c26c2a7120e2c4e14170b2cfbd1f6f4b6..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Download Mac Miller Swimming Zip and Hear His Legacy.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Download Mac Miller Swimming Zip


        Download File ✸✸✸ https://tinurll.com/2uzlZo



        - - aaccfb2cb3
        -
        -
        -

        diff --git a/spaces/rorallitri/biomedical-language-models/logs/Download Worlds Fastest Indian MKV 108036 for Free The Amazing Adventure of a Speed Record Breaker.md b/spaces/rorallitri/biomedical-language-models/logs/Download Worlds Fastest Indian MKV 108036 for Free The Amazing Adventure of a Speed Record Breaker.md deleted file mode 100644 index 728687ed35d6c0f135331c13a25266e160be34fe..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Download Worlds Fastest Indian MKV 108036 for Free The Amazing Adventure of a Speed Record Breaker.md +++ /dev/null @@ -1,5 +0,0 @@ -
        -

        dwd studios covert ops pdf download
        nine stories jd salinger epub format
        clean code robert martin epub download 14
        ellie goulding lights full album download 57
        battlefield bad company 2 crack download german
        Adobe PhotoShop CS4 [Trusted Download]
        vector magic 1.15 full tutorial keygen torrent
        [Top rated] berto y yomo
        100 decisive battles from ancient times to the present pdf download
        world's fastest indian mkv 108036

        -

        world's fastest indian mkv 108036


        Download ••• https://tinurll.com/2uznMq



        aaccfb2cb3
        -
        -
        \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/Henrique e Diego 2015 CD Download Sute 14 Bom Motivo .md b/spaces/rorallitri/biomedical-language-models/logs/Henrique e Diego 2015 CD Download Sute 14 Bom Motivo .md deleted file mode 100644 index ddf2798e5a28ee3f581323cf88042b29521cffe6..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Henrique e Diego 2015 CD Download Sute 14 Bom Motivo .md +++ /dev/null @@ -1,6 +0,0 @@ -

        henrique e diego 2015 cd download


        Download Zip » https://tinurll.com/2uznSb



        -
        - aaccfb2cb3
        -
        -
        -

        diff --git a/spaces/rosenthal/chess/chessfenbot/save_graph.py b/spaces/rosenthal/chess/chessfenbot/save_graph.py deleted file mode 100644 index bf5ee38bddc90454fa822c0222132a050b2106d3..0000000000000000000000000000000000000000 --- a/spaces/rosenthal/chess/chessfenbot/save_graph.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Generate graph.pb and graph.pbtxt - -import os -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' # Ignore Tensorflow INFO debug messages -import tensorflow as tf -import numpy as np - -# Output graph to the same directory as the checkpoint. -output_graph = "saved_models/graph.pb" -output_graphtxt = ('saved_models', 'graph.pbtxt') - -# Set up a fresh session and create the model and load it from the saved checkpoint. -tf.reset_default_graph() # clear out graph. -sess = tf.Session() - -model_path='saved_models/model_10000.ckpt' - -def weight_variable(shape, name=""): - initial = tf.truncated_normal(shape, stddev=0.1) - return tf.Variable(initial, name) - -def bias_variable(shape, name=""): - initial = tf.constant(0.1, shape=shape) - return tf.Variable(initial, name) - -def conv2d(x, W): - return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') - -def max_pool_2x2(x, name=""): - return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], - strides=[1, 2, 2, 1], padding='SAME', name=name) - -x = tf.placeholder(tf.float32, [None, 32*32], 'Input') - -# First layer : 32 features -W_conv1 = weight_variable([5, 5, 1, 32], name='W1') -b_conv1 = bias_variable([32], name='B1') - -x_image = tf.reshape(x, [-1,32,32,1]) - -h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1, name='Conv1') -h_pool1 = max_pool_2x2(h_conv1, name='Pool1') - -# Second convolutional layer : 64 features -W_conv2 = weight_variable([5, 5, 32, 64], name='W2') -b_conv2 = bias_variable([64], name='B2') - -h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2, name='Conv2') -h_pool2 = max_pool_2x2(h_conv2, name='Pool2') - -# Densely connected layer : 1024 neurons, image size now 8x8 -W_fc1 = weight_variable([8 * 8 * 64, 1024], name='W3') -b_fc1 = bias_variable([1024], name='B3') - -h_pool2_flat = tf.reshape(h_pool2, [-1, 8*8*64], name='Pool3') -h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1, 'MatMult3') - -# Dropout -keep_prob = tf.placeholder("float", name='KeepProb') -h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob, name='Drop4') - -# Readout layer : softmax, 13 features -W_fc2 = weight_variable([1024, 13], name='W5') -b_fc2 = bias_variable([13], name='B5') - -# Probabilities -y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2, name='probabilities') - -# Final prediction -prediction = tf.argmax(y_conv,1, name='prediction') - -# Ground truth labels if exist -y_ = tf.placeholder(tf.float32, [None, 13], name='Ytruth') -actual_answer = tf.argmax(y_,1, name='actual') - -cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv), name='CrossEntropy') - -# train_step = tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy) -train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) - -correct_prediction = tf.equal(prediction, actual_answer, name='CorrectPrediction') -accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"), name='Accuracy') - -# Add ops to save and restore all the variables. -saver = tf.train.Saver() - -# Restore model from checkpoint -print("\t Loading model '%s'" % model_path) -saver.restore(sess, model_path) -print("\t Model restored.") - -# Write graph in text format -tf.train.write_graph(sess.graph_def,output_graphtxt[0], output_graphtxt[1]) - -# To freeze graph then use: -# python3 -m tensorflow.python.tools.freeze_graph --input_graph graph.pbtxt --input_checkpoint=model_10000.ckpt --input_binary=false --output_graph=actual_frozen.pb --output_node_names=prediction,probabilities - -# We also save the binary-encoded graph that may or may not be frozen (TBD) below. -# We use a built-in TF helper to export variables to constants -output_graph_def = tf.graph_util.convert_variables_to_constants( - sess, # The session is used to retrieve the weights - tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes - ["prediction", "probabilities"] # The output node names are used to select the useful nodes -) - -# Finally we serialize and dump the output graph to the filesystem -with tf.gfile.GFile(output_graph, "wb") as f: - f.write(output_graph_def.SerializeToString()) -print("%d ops in the final graph." % len(output_graph_def.node)) \ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/Masnoon Dua In Urdu Pdf LINK Free 21.md b/spaces/scedlatioru/img-to-music/example/Masnoon Dua In Urdu Pdf LINK Free 21.md deleted file mode 100644 index 88d3fbd4bab72f830228159ec9cc6388b3bdf2e5..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Masnoon Dua In Urdu Pdf LINK Free 21.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Masnoon Dua In Urdu Pdf Free 21


        Download File 🗹 https://gohhs.com/2uEzsW



        - -Therefore, we are pleased to release our first multilingual book "MASNUN DUA" in Arabic, English and Tamil. English Department PUBLISHING POST OF ISLAM 10. 02.2015 Al-Manar magazine in Arabic in the issue of February 7, 2015 published another letter from "Islamic scholars", headed by "Sheikh Ahmad al-Khazraji and Dr. Muhammad Al-Khazraji." The authors of the article cite a number of texts and statements by “Islamic scholars” confirming that faith and Islam are obligatory for all Muslims without exception. urdu translation in pdf format. The book was translated by Hafiz. This is one of the collections dedicated to the study of the Qur'an and the Sunnah of the Prophet Muhammad (peace be upon him).It contains not only duas in Arabic with an Urdu translation, but also an interpretation on them. Contains Surah Fatiha, Surah Al-Qadr, Prayer for Protection from Hell and more. The book will help both those who are just beginning to study the Quran, and those who already know a lot about the basics set forth in the Quran. In addition, the book contains verses from the Quran in Urdu. Islamic creed. Islam and its history. 8a78ff9644
        -
        -
        -

        diff --git a/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/__init__.py b/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/shgao/EditAnything/dataset_build.py b/spaces/shgao/EditAnything/dataset_build.py deleted file mode 100644 index ec0a705c1d3e5fb4cd48eb70b43e1163561d747b..0000000000000000000000000000000000000000 --- a/spaces/shgao/EditAnything/dataset_build.py +++ /dev/null @@ -1,45 +0,0 @@ -from PIL import Image -import json - -from transformers import AutoProcessor, Blip2ForConditionalGeneration -import torch -import os - -processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b") -model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16) - -device = "cuda" if torch.cuda.is_available() else "cpu" -model.to(device) - -def get_blip2_text(image): - inputs = processor(image, return_tensors="pt").to(device, torch.float16) - generated_ids = model.generate(**inputs, max_new_tokens=50) - generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() - return generated_text - - -data_path = "files" -save_path = "" - -image_names = os.listdir(data_path) -image_names = sorted(image_names) - -text_data = {} -f = open("data.txt","w") -for each in image_names: - if '.jpg' in each: - this_data = {} - this_data['target'] = each - this_data['source'] = each[:-4]+'.json' - this_image = Image.open(os.path.join(data_path, each)) - print(each) - generated_text = get_blip2_text(this_image) - this_data['prompt'] = generated_text - print(this_data) - f.write(str(this_data)+"\n") -f.close() - - - - - diff --git a/spaces/shigel/aiemo/app.py b/spaces/shigel/aiemo/app.py deleted file mode 100644 index 249abfe3f6360dbd058fe209068056b6eb38cf52..0000000000000000000000000000000000000000 --- a/spaces/shigel/aiemo/app.py +++ /dev/null @@ -1,116 +0,0 @@ -import gradio as gr -import openai -import requests -import os -import fileinput -from dotenv import load_dotenv - -title="感情診断(β)" -inputs_label="あなたが話したいことは何ですか?" -outputs_label="AIが返信をして、あなたの話したことを受け取った人がどんな気持ちなのかが分かります。" -description=""" -- 感情診断(β)を使うと、AIが相手の気持ちを推定して1分程度で返信してくれます! -- 伝えたいことがあるんだけど、相手がどんな反応になるか気になって話せない人は、感情診断(β)を使って試してみてはいかがでしょうか? -- ※入出力の文字数は最大1000文字程度までを目安に入力してください。 -""" - -article = """ -
        Duplicate Space
        -
        注意事項
        -
          -
        • 当サービスでは、2023/3/1にリリースされたOpenAI社のChatGPT APIのgpt-3.5-turboを使用しております。
        • -
        • 当サービスで生成されたコンテンツは、OpenAI が提供する人工知能によるものであり、当サービスやOpenAI がその正確性や信頼性を保証するものではありません。
        • -
        • OpenAI の利用規約に従い、データ保持しない方針です(ただし諸般の事情によっては変更する可能性はございます)。 -
        • 当サービスで生成されたコンテンツは事実確認をした上で、コンテンツ生成者およびコンテンツ利用者の責任において利用してください。
        • -
        • 当サービスでの使用により発生したいかなる損害についても、当社は一切の責任を負いません。
        • -
        • 当サービスはβ版のため、予告なくサービスを終了する場合がございます。
        • -
        -""" - -load_dotenv() -openai.api_key = os.getenv('OPENAI_API_KEY') -MODEL = "gpt-3.5-turbo" - -def get_filetext(filename, cache={}): - if filename in cache: - # キャッシュに保存されている場合は、キャッシュからファイル内容を取得する - return cache[filename] - else: - if not os.path.exists(filename): - raise ValueError(f"ファイル '{filename}' が見つかりませんでした") - with open(filename, "r") as f: - text = f.read() - # ファイル内容をキャッシュする - cache[filename] = text - return text - -class OpenAI: - - @classmethod - def chat_completion(cls, prompt, start_with=""): - constraints = get_filetext(filename = "constraints.md") - template = get_filetext(filename = "template.md") - - # ChatCompletion APIに渡すデータを定義する - data = { - "model": "gpt-3.5-turbo", - "messages": [ - {"role": "system", "content": constraints} - ,{"role": "system", "content": template} - ,{"role": "assistant", "content": "Sure!"} - ,{"role": "user", "content": prompt} - ,{"role": "assistant", "content": start_with} - ], - } - - # ChatCompletion APIを呼び出す - response = requests.post( - "https://api.openai.com/v1/chat/completions", - headers={ - "Content-Type": "application/json", - "Authorization": f"Bearer {openai.api_key}" - }, - json=data - ) - - # ChatCompletion APIから返された結果を取得する - result = response.json() - print(result) - content = result["choices"][0]["message"]["content"].strip() - return content - -class NajiminoAI: - - @classmethod - def generate_emo_prompt(cls, user_message): - template = get_filetext(filename="template.md") - prompt = f""" - {user_message} - --- - 上記を元に、下記テンプレートを埋めてください。 - --- - {template} - """ - return prompt - - @classmethod - def generate_emo(cls, user_message): - prompt = NajiminoAI.generate_emo_prompt(user_message); - start_with = "" - result = OpenAI.chat_completion(prompt=prompt, start_with=start_with) - return result - -def main(): - iface = gr.Interface(fn=NajiminoAI.generate_emo, - inputs=gr.Textbox(label=inputs_label), - outputs=gr.Textbox(label=outputs_label), - title=title, - description=description, - article=article, - allow_flagging='never' - ) - - iface.launch() - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/spaces/shriarul5273/Yolov7/models/experimental.py b/spaces/shriarul5273/Yolov7/models/experimental.py deleted file mode 100644 index 735d7aa0ebe7dbf3c4b062ebc3858cb5f9ebab40..0000000000000000000000000000000000000000 --- a/spaces/shriarul5273/Yolov7/models/experimental.py +++ /dev/null @@ -1,272 +0,0 @@ -import numpy as np -import random -import torch -import torch.nn as nn - -from models.common import Conv, DWConv -from utils.google_utils import attempt_download - - -class CrossConv(nn.Module): - # Cross Convolution Downsample - def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): - # ch_in, ch_out, kernel, stride, groups, expansion, shortcut - super(CrossConv, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, (1, k), (1, s)) - self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) - self.add = shortcut and c1 == c2 - - def forward(self, x): - return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) - - -class Sum(nn.Module): - # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 - def __init__(self, n, weight=False): # n: number of inputs - super(Sum, self).__init__() - self.weight = weight # apply weights boolean - self.iter = range(n - 1) # iter object - if weight: - self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights - - def forward(self, x): - y = x[0] # no weight - if self.weight: - w = torch.sigmoid(self.w) * 2 - for i in self.iter: - y = y + x[i + 1] * w[i] - else: - for i in self.iter: - y = y + x[i + 1] - return y - - -class MixConv2d(nn.Module): - # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 - def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): - super(MixConv2d, self).__init__() - groups = len(k) - if equal_ch: # equal c_ per group - i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices - c_ = [(i == g).sum() for g in range(groups)] # intermediate channels - else: # equal weight.numel() per group - b = [c2] + [0] * groups - a = np.eye(groups + 1, groups, k=-1) - a -= np.roll(a, 1, axis=1) - a *= np.array(k) ** 2 - a[0] = 1 - c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b - - self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) - self.bn = nn.BatchNorm2d(c2) - self.act = nn.LeakyReLU(0.1, inplace=True) - - def forward(self, x): - return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) - - -class Ensemble(nn.ModuleList): - # Ensemble of models - def __init__(self): - super(Ensemble, self).__init__() - - def forward(self, x, augment=False): - y = [] - for module in self: - y.append(module(x, augment)[0]) - # y = torch.stack(y).max(0)[0] # max ensemble - # y = torch.stack(y).mean(0) # mean ensemble - y = torch.cat(y, 1) # nms ensemble - return y, None # inference, train output - - - - - -class ORT_NMS(torch.autograd.Function): - '''ONNX-Runtime NMS operation''' - @staticmethod - def forward(ctx, - boxes, - scores, - max_output_boxes_per_class=torch.tensor([100]), - iou_threshold=torch.tensor([0.45]), - score_threshold=torch.tensor([0.25])): - device = boxes.device - batch = scores.shape[0] - num_det = random.randint(0, 100) - batches = torch.randint(0, batch, (num_det,)).sort()[0].to(device) - idxs = torch.arange(100, 100 + num_det).to(device) - zeros = torch.zeros((num_det,), dtype=torch.int64).to(device) - selected_indices = torch.cat([batches[None], zeros[None], idxs[None]], 0).T.contiguous() - selected_indices = selected_indices.to(torch.int64) - return selected_indices - - @staticmethod - def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold): - return g.op("NonMaxSuppression", boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) - - -class TRT_NMS(torch.autograd.Function): - '''TensorRT NMS operation''' - @staticmethod - def forward( - ctx, - boxes, - scores, - background_class=-1, - box_coding=1, - iou_threshold=0.45, - max_output_boxes=100, - plugin_version="1", - score_activation=0, - score_threshold=0.25, - ): - batch_size, num_boxes, num_classes = scores.shape - num_det = torch.randint(0, max_output_boxes, (batch_size, 1), dtype=torch.int32) - det_boxes = torch.randn(batch_size, max_output_boxes, 4) - det_scores = torch.randn(batch_size, max_output_boxes) - det_classes = torch.randint(0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32) - return num_det, det_boxes, det_scores, det_classes - - @staticmethod - def symbolic(g, - boxes, - scores, - background_class=-1, - box_coding=1, - iou_threshold=0.45, - max_output_boxes=100, - plugin_version="1", - score_activation=0, - score_threshold=0.25): - out = g.op("TRT::EfficientNMS_TRT", - boxes, - scores, - background_class_i=background_class, - box_coding_i=box_coding, - iou_threshold_f=iou_threshold, - max_output_boxes_i=max_output_boxes, - plugin_version_s=plugin_version, - score_activation_i=score_activation, - score_threshold_f=score_threshold, - outputs=4) - nums, boxes, scores, classes = out - return nums, boxes, scores, classes - - -class ONNX_ORT(nn.Module): - '''onnx module with ONNX-Runtime NMS operation.''' - def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=640, device=None, n_classes=80): - super().__init__() - self.device = device if device else torch.device("cpu") - self.max_obj = torch.tensor([max_obj]).to(device) - self.iou_threshold = torch.tensor([iou_thres]).to(device) - self.score_threshold = torch.tensor([score_thres]).to(device) - self.max_wh = max_wh # if max_wh != 0 : non-agnostic else : agnostic - self.convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], - dtype=torch.float32, - device=self.device) - self.n_classes=n_classes - - def forward(self, x): - boxes = x[:, :, :4] - conf = x[:, :, 4:5] - scores = x[:, :, 5:] - if self.n_classes == 1: - scores = conf # for models with one class, cls_loss is 0 and cls_conf is always 0.5, - # so there is no need to multiplicate. - else: - scores *= conf # conf = obj_conf * cls_conf - boxes @= self.convert_matrix - max_score, category_id = scores.max(2, keepdim=True) - dis = category_id.float() * self.max_wh - nmsbox = boxes + dis - max_score_tp = max_score.transpose(1, 2).contiguous() - selected_indices = ORT_NMS.apply(nmsbox, max_score_tp, self.max_obj, self.iou_threshold, self.score_threshold) - X, Y = selected_indices[:, 0], selected_indices[:, 2] - selected_boxes = boxes[X, Y, :] - selected_categories = category_id[X, Y, :].float() - selected_scores = max_score[X, Y, :] - X = X.unsqueeze(1).float() - return torch.cat([X, selected_boxes, selected_categories, selected_scores], 1) - -class ONNX_TRT(nn.Module): - '''onnx module with TensorRT NMS operation.''' - def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=None ,device=None, n_classes=80): - super().__init__() - assert max_wh is None - self.device = device if device else torch.device('cpu') - self.background_class = -1, - self.box_coding = 1, - self.iou_threshold = iou_thres - self.max_obj = max_obj - self.plugin_version = '1' - self.score_activation = 0 - self.score_threshold = score_thres - self.n_classes=n_classes - - def forward(self, x): - boxes = x[:, :, :4] - conf = x[:, :, 4:5] - scores = x[:, :, 5:] - if self.n_classes == 1: - scores = conf # for models with one class, cls_loss is 0 and cls_conf is always 0.5, - # so there is no need to multiplicate. - else: - scores *= conf # conf = obj_conf * cls_conf - num_det, det_boxes, det_scores, det_classes = TRT_NMS.apply(boxes, scores, self.background_class, self.box_coding, - self.iou_threshold, self.max_obj, - self.plugin_version, self.score_activation, - self.score_threshold) - return num_det, det_boxes, det_scores, det_classes - - -class End2End(nn.Module): - '''export onnx or tensorrt model with NMS operation.''' - def __init__(self, model, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=None, device=None, n_classes=80): - super().__init__() - device = device if device else torch.device('cpu') - assert isinstance(max_wh,(int)) or max_wh is None - self.model = model.to(device) - self.model.model[-1].end2end = True - self.patch_model = ONNX_TRT if max_wh is None else ONNX_ORT - self.end2end = self.patch_model(max_obj, iou_thres, score_thres, max_wh, device, n_classes) - self.end2end.eval() - - def forward(self, x): - x = self.model(x) - x = self.end2end(x) - return x - - - - - -def attempt_load(weights, map_location=None): - # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a - model = Ensemble() - for w in weights if isinstance(weights, list) else [weights]: - attempt_download(w) - ckpt = torch.load(w, map_location=map_location) # load - model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model - - # Compatibility updates - for m in model.modules(): - if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: - m.inplace = True # pytorch 1.7.0 compatibility - elif type(m) is nn.Upsample: - m.recompute_scale_factor = None # torch 1.11.0 compatibility - elif type(m) is Conv: - m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility - - if len(model) == 1: - return model[-1] # return model - else: - print('Ensemble created with %s\n' % weights) - for k in ['names', 'stride']: - setattr(model, k, getattr(model[-1], k)) - return model # return ensemble - - diff --git a/spaces/sidharthism/fashion-eye/models/stylegan2/stylegan2-pytorch/op/fused_act.py b/spaces/sidharthism/fashion-eye/models/stylegan2/stylegan2-pytorch/op/fused_act.py deleted file mode 100644 index 7e3d464ae656920c6875bc877281cadb2eaa4105..0000000000000000000000000000000000000000 --- a/spaces/sidharthism/fashion-eye/models/stylegan2/stylegan2-pytorch/op/fused_act.py +++ /dev/null @@ -1,92 +0,0 @@ -import os -import platform - -import torch -from torch import nn -from torch.autograd import Function -import torch.nn.functional as F -from torch.utils.cpp_extension import load - -use_fallback = False - -# Try loading precompiled, otherwise use native fallback -try: - import fused -except ModuleNotFoundError as e: - print('StyleGAN2: Optimized CUDA op FusedLeakyReLU not available, using native PyTorch fallback.') - use_fallback = True - - -class FusedLeakyReLUFunctionBackward(Function): - @staticmethod - def forward(ctx, grad_output, out, negative_slope, scale): - ctx.save_for_backward(out) - ctx.negative_slope = negative_slope - ctx.scale = scale - - empty = grad_output.new_empty(0) - - grad_input = fused.fused_bias_act( - grad_output, empty, out, 3, 1, negative_slope, scale - ) - - dim = [0] - - if grad_input.ndim > 2: - dim += list(range(2, grad_input.ndim)) - - grad_bias = grad_input.sum(dim).detach() - - return grad_input, grad_bias - - @staticmethod - def backward(ctx, gradgrad_input, gradgrad_bias): - out, = ctx.saved_tensors - gradgrad_out = fused.fused_bias_act( - gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale - ) - - return gradgrad_out, None, None, None - - -class FusedLeakyReLUFunction(Function): - @staticmethod - def forward(ctx, input, bias, negative_slope, scale): - empty = input.new_empty(0) - out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) - ctx.save_for_backward(out) - ctx.negative_slope = negative_slope - ctx.scale = scale - - return out - - @staticmethod - def backward(ctx, grad_output): - out, = ctx.saved_tensors - - grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( - grad_output, out, ctx.negative_slope, ctx.scale - ) - - return grad_input, grad_bias, None, None - - -class FusedLeakyReLU(nn.Module): - def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5): - super().__init__() - - self.bias = nn.Parameter(torch.zeros(channel)) - self.negative_slope = negative_slope - self.scale = scale - - def forward(self, input): - return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale) - - -def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): - if use_fallback or input.device.type == 'cpu': - return scale * F.leaky_relu( - input + bias.view((1, -1)+(1,)*(input.ndim-2)), negative_slope=negative_slope - ) - else: - return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) diff --git a/spaces/sidphbot/Researcher/arxiv_public_data/authors.py b/spaces/sidphbot/Researcher/arxiv_public_data/authors.py deleted file mode 100644 index 955f044f0ff9f6aee78f7fc7301897138eec05ab..0000000000000000000000000000000000000000 --- a/spaces/sidphbot/Researcher/arxiv_public_data/authors.py +++ /dev/null @@ -1,469 +0,0 @@ -# https://github.com/arXiv/arxiv-base@32e6ad0 -""" -Copyright 2017 Cornell University - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -""" - -"""Parse Authors lines to extract author and affiliation data.""" -import re -import os -import gzip -import json -from itertools import dropwhile -from typing import Dict, Iterator, List, Tuple -from multiprocessing import Pool, cpu_count - -from arxiv_public_data.tex2utf import tex2utf -from arxiv_public_data.config import LOGGER, DIR_OUTPUT - -logger = LOGGER.getChild('authorsplit') - -PREFIX_MATCH = 'van|der|de|la|von|del|della|da|mac|ter|dem|di|vaziri' - -""" -Takes data from an Author: line in the current arXiv abstract -file and returns a structured set of data: - - author_list_ptr = [ - [ author1_keyname, author1_firstnames, author1_suffix, affil1, affil2 ] , - [ author2_keyname, author2_firstnames, author1_suffix, affil1 ] , - [ author3_keyname, author3_firstnames, author1_suffix ] - ] - -Abstracted from Dienst software for OAI1 and other uses. This -routine should just go away when a better metadata structure is -adopted that deals with names and affiliations properly. - -Must remember that there is at least one person one the archive -who has only one name, this should clearly be considered the key name. - -Code originally written by Christina Scovel, Simeon Warner Dec99/Jan00 - 2000-10-16 - separated. - 2000-12-07 - added support for suffix - 2003-02-14 - get surname prefixes from arXiv::Filters::Index [Simeon] - 2007-10-01 - created test script, some tidying [Simeon] - 2018-05-25 - Translated from Perl to Python [Brian C.] -""" - - -def parse_author_affil(authors: str) -> List[List[str]]: - """ - Parse author line and returns an list of author and affiliation data. - - The list for each author will have at least three elements for - keyname, firstname(s) and suffix. The keyname will always have content - but the other strings might be empty strings if there is no firstname - or suffix. Any additional elements after the first three are affiliations, - there may be zero or more. - - Handling of prefix "XX collaboration" etc. is duplicated here and in - arXiv::HTML::AuthorLink -- it shouldn't be. Likely should just be here. - - This routine is just a wrapper around the two parts that first split - the authors line into parts, and then back propagate the affiliations. - The first part is to be used along for display where we do not want - to back propagate affiliation information. - - :param authors: string of authors from abs file or similar - :return: - Returns a structured set of data: - author_list_ptr = [ - [ author1_keyname, author1_firstnames, author1_suffix, affil1, affil2 ], - [ author2_keyname, author2_firstnames, author1_suffix, affil1 ] , - [ author3_keyname, author3_firstnames, author1_suffix ] - ] - """ - return _parse_author_affil_back_propagate( - **_parse_author_affil_split(authors)) - - -def _parse_author_affil_split(author_line: str) -> Dict: - """ - Split author line into author and affiliation data. - - Take author line, tidy spacing and punctuation, and then split up into - individual author an affiliation data. Has special cases to avoid splitting - an initial collaboration name and records in $back_propagate_affiliation_to - the fact that affiliations should not be back propagated to collaboration - names. - - Does not handle multiple collaboration names. - """ - if not author_line: - return {'author_list': [], 'back_prop': 0} - - names: List[str] = split_authors(author_line) - if not names: - return {'author_list': [], 'back_prop': 0} - - names = _remove_double_commas(names) - # get rid of commas at back - namesIter: Iterator[str] = reversed( - list(dropwhile(lambda x: x == ',', reversed(names)))) - # get rid of commas at front - names = list(dropwhile(lambda x: x == ',', namesIter)) - - # Extract all names (all parts not starting with comma or paren) - names = list(map(_tidy_name, filter( - lambda x: re.match('^[^](,]', x), names))) - names = list(filter(lambda n: not re.match( - r'^\s*et\.?\s+al\.?\s*', n, flags=re.IGNORECASE), names)) - - (names, author_list, - back_propagate_affiliations_to) = _collaboration_at_start(names) - - (enumaffils) = _enum_collaboration_at_end(author_line) - - # Split name into keyname and firstnames/initials. - # Deal with different patterns in turn: prefixes, suffixes, plain - # and single name. - patterns = [('double-prefix', - r'^(.*)\s+(' + PREFIX_MATCH + r')\s(' + - PREFIX_MATCH + r')\s(\S+)$'), - ('name-prefix-name', - r'^(.*)\s+(' + PREFIX_MATCH + r')\s(\S+)$'), - ('name-name-prefix', - r'^(.*)\s+(\S+)\s(I|II|III|IV|V|Sr|Jr|Sr\.|Jr\.)$'), - ('name-name', - r'^(.*)\s+(\S+)$'), ] - - # Now go through names in turn and try to get affiliations - # to go with them - for name in names: - pattern_matches = ((mtype, re.match(m, name, flags=re.IGNORECASE)) - for (mtype, m) in patterns) - - (mtype, match) = next(((mtype, m) - for (mtype, m) in pattern_matches - if m is not None), ('default', None)) - if match is None: - author_entry = [name, '', ''] - elif mtype == 'double-prefix': - s = '{} {} {}'.format(match.group( - 2), match.group(3), match.group(4)) - author_entry = [s, match.group(1), ''] - elif mtype == 'name-prefix-name': - s = '{} {}'.format(match.group(2), match.group(3)) - author_entry = [s, match.group(1), ''] - elif mtype == 'name-name-prefix': - author_entry = [match.group(2), match.group(1), match.group(3)] - elif mtype == 'name-name': - author_entry = [match.group(2), match.group(1), ''] - else: - author_entry = [name, '', ''] - - # search back in author_line for affiliation - author_entry = _add_affiliation( - author_line, enumaffils, author_entry, name) - author_list.append(author_entry) - - return {'author_list': author_list, - 'back_prop': back_propagate_affiliations_to} - - -def parse_author_affil_utf(authors: str) -> List: - """ - Call parse_author_affil() and do TeX to UTF conversion. - - Output structure is the same but should be in UTF and not TeX. - """ - if not authors: - return [] - return list(map(lambda author: list(map(tex2utf, author)), - parse_author_affil(authors))) - - -def _remove_double_commas(items: List[str]) -> List[str]: - - parts: List[str] = [] - last = '' - for pt in items: - if pt == ',' and last == ',': - continue - else: - parts.append(pt) - last = pt - return parts - - -def _tidy_name(name: str) -> str: - name = re.sub(r'\s\s+', ' ', name) # also gets rid of CR - # add space after dot (except in TeX) - name = re.sub(r'(?', name) - return name - - -def _collaboration_at_start(names: List[str]) \ - -> Tuple[List[str], List[List[str]], int]: - """Perform special handling of collaboration at start.""" - author_list = [] - - back_propagate_affiliations_to = 0 - while len(names) > 0: - m = re.search(r'([a-z0-9\s]+\s+(collaboration|group|team))', - names[0], flags=re.IGNORECASE) - if not m: - break - - # Add to author list - author_list.append([m.group(1), '', '']) - back_propagate_affiliations_to += 1 - # Remove from names - names.pop(0) - # Also swallow and following comma or colon - if names and (names[0] == ',' or names[0] == ':'): - names.pop(0) - - return names, author_list, back_propagate_affiliations_to - - -def _enum_collaboration_at_end(author_line: str)->Dict: - """Get separate set of enumerated affiliations from end of author_line.""" - # Now see if we have a separate set of enumerated affiliations - # This is indicated by finding '(\s*(' - line_m = re.search(r'\(\s*\((.*)$', author_line) - if not line_m: - return {} - - enumaffils = {} - affils = re.sub(r'\s*\)\s*$', '', line_m.group(1)) - - # Now expect to have '1) affil1 (2) affil2 (3) affil3' - for affil in affils.split('('): - # Now expect `1) affil1 ', discard if no match - m = re.match(r'^(\d+)\)\s*(\S.*\S)\s*$', affil) - if m: - enumaffils[m.group(1)] = re.sub(r'[\.,\s]*$', '', m.group(2)) - - return enumaffils - - -def _add_affiliation(author_line: str, - enumaffils: Dict, - author_entry: List[str], - name: str) -> List: - """ - Add author affiliation to author_entry if one is found in author_line. - - This should deal with these cases - Smith B(labX) Smith B(1) Smith B(1, 2) Smith B(1 & 2) Smith B(1 and 2) - """ - en = re.escape(name) - namerex = r'{}\s*\(([^\(\)]+)'.format(en.replace(' ', 's*')) - m = re.search(namerex, author_line, flags=re.IGNORECASE) - if not m: - return author_entry - - # Now see if we have enumerated references (just commas, digits, &, and) - affils = m.group(1).rstrip().lstrip() - affils = re.sub(r'(&|and)/,', ',', affils, flags=re.IGNORECASE) - - if re.match(r'^[\d,\s]+$', affils): - for affil in affils.split(','): - if affil in enumaffils: - author_entry.append(enumaffils[affil]) - else: - author_entry.append(affils) - - return author_entry - - -def _parse_author_affil_back_propagate(author_list: List[List[str]], - back_prop: int) -> List[List[str]]: - """Back propagate author affiliation. - - Take the author list structure generated by parse_author_affil_split(..) - and propagate affiliation information backwards to preceeding author - entries where none was give. Stop before entry $back_prop to avoid - adding affiliation information to collaboration names. - - given, eg: - a.b.first, c.d.second (affil) - implies - a.b.first (affil), c.d.second (affil) - and in more complex cases: - a.b.first, c.d.second (1), e.f.third, g.h.forth (2,3) - implies - a.b.first (1), c.d.second (1), e.f.third (2,3), g.h.forth (2,3) - """ - last_affil: List[str] = [] - for x in range(len(author_list) - 1, max(back_prop - 1, -1), -1): - author_entry = author_list[x] - if len(author_entry) > 3: # author has affiliation,store - last_affil = author_entry - elif last_affil: - # author doesn't have affil but later one did => copy - author_entry.extend(last_affil[3:]) - - return author_list - - -def split_authors(authors: str) -> List: - """ - Split author string into authors entity lists. - - Take an author line as a string and return a reference to a list of the - different name and affiliation blocks. While this does normalize spacing - and 'and', it is a key feature that the set of strings returned can be - concatenated to reproduce the original authors line. This code thus - provides a very graceful degredation for badly formatted authors lines, as - the text at least shows up. - """ - # split authors field into blocks with boundaries of ( and ) - if not authors: - return [] - aus = re.split(r'(\(|\))', authors) - aus = list(filter(lambda x: x != '', aus)) - - blocks = [] - if len(aus) == 1: - blocks.append(authors) - else: - c = '' - depth = 0 - for bit in aus: - if bit == '': - continue - if bit == '(': # track open parentheses - depth += 1 - if depth == 1: - blocks.append(c) - c = '(' - else: - c = c + bit - elif bit == ')': # track close parentheses - depth -= 1 - c = c + bit - if depth == 0: - blocks.append(c) - c = '' - else: # haven't closed, so keep accumulating - continue - else: - c = c + bit - if c: - blocks.append(c) - - listx = [] - - for block in blocks: - block = re.sub(r'\s+', ' ', block) - if re.match(r'^\(', block): # it is a comment - listx.append(block) - else: # it is a name - block = re.sub(r',?\s+(and|\&)\s', ',', block) - names = re.split(r'(,|:)\s*', block) - for name in names: - if not name: - continue - name = name.rstrip().lstrip() - if name: - listx.append(name) - - # Recombine suffixes that were separated with a comma - parts: List[str] = [] - for p in listx: - if re.match(r'^(Jr\.?|Sr\.?\[IV]{2,})$', p) \ - and len(parts) >= 2 \ - and parts[-1] == ',' \ - and not re.match(r'\)$', parts[-2]): - separator = parts.pop() - last = parts.pop() - recomb = "{}{} {}".format(last, separator, p) - parts.append(recomb) - else: - parts.append(p) - - return parts - -def parse_authorline(authors: str) -> str: - """ - The external facing function from this module. Converts a complex authorline - into a simple one with only UTF-8. - - Parameters - ---------- - authors : string - The raw author line from the metadata - - Returns - ------- - clean_authors : string - String represeting cleaned author line - - Examples - -------- - >>> parse_authorline('A. Losev, S. Shadrin, I. Shneiberg') - 'Losev, A.; Shadrin, S.; Shneiberg, I.' - - >>> parse_authorline("C. Bal\\'azs, E. L. Berger, P. M. Nadolsky, C.-P. Yuan") - 'Balázs, C.; Berger, E. L.; Nadolsky, P. M.; Yuan, C. -P.' - - >>> parse_authorline('Stephen C. Power (Lancaster University), Baruch Solel (Technion)') - 'Power, Stephen C.; Solel, Baruch' - - >>> parse_authorline("L. Scheck (1), H.-Th. Janka (1), T. Foglizzo (2), and K. Kifonidis (1)\n ((1) MPI for Astrophysics, Garching; (2) Service d'Astrophysique, CEA-Saclay)") - 'Scheck, L.; Janka, H. -Th.; Foglizzo, T.; Kifonidis, K.' - """ - names = parse_author_affil_utf(authors) - return '; '.join([', '.join([q for q in n[:2] if q]) for n in names]) - -def _parse_article_authors(article_author): - try: - return [article_author[0], parse_author_affil_utf(article_author[1])] - except Exception as e: - msg = "Author split failed for article {}".format(article_author[0]) - logger.error(msg) - logger.exception(e) - return [article_author[0], ''] - -def parse_authorline_parallel(article_authors, n_processes=None): - """ - Parallelize `parse_authorline` - Parameters - ---------- - article_authors : list - list of tuples (arXiv id, author strings from metadata) - (optional) - n_processes : int - number of processes - Returns - ------- - authorsplit : list - list of author strings in standardized format - [ - [ author1_keyname, author1_firstnames, author1_suffix, affil1, - affil2 ] , - [ author2_keyname, author2_firstnames, author1_suffix, affil1 ] , - [ author3_keyname, author3_firstnames, author1_suffix ] - ] - """ - logger.info( - 'Parsing author lines for {} articles...'.format(len(article_authors)) - ) - - pool = Pool(n_processes) - parsed = pool.map(_parse_article_authors, article_authors) - outdict = {aid: auth for aid, auth in parsed} - - filename = os.path.join(DIR_OUTPUT, 'authors-parsed.json.gz') - logger.info('Saving to {}'.format(filename)) - with gzip.open(filename, 'wb') as fout: - fout.write(json.dumps(outdict).encode('utf-8')) diff --git a/spaces/sieferan2023/Music_Recommendation/app.py b/spaces/sieferan2023/Music_Recommendation/app.py deleted file mode 100644 index b8e324b9c29780cc194b84219d4782bd519931d7..0000000000000000000000000000000000000000 --- a/spaces/sieferan2023/Music_Recommendation/app.py +++ /dev/null @@ -1,172 +0,0 @@ -### ----------------------------- ### -### libraries ### -### ----------------------------- ### - -import gradio as gr -import pandas as pd -import numpy as np -from sklearn.model_selection import train_test_split -from sklearn.linear_model import LogisticRegression -from sklearn import metrics - - -### ------------------------------ ### -### data transformation ### -### ------------------------------ ### - -# load dataset -uncleaned_data = pd.read_csv('data.csv') - -# remove timestamp from dataset (always first column) -uncleaned_data = uncleaned_data.iloc[: , 1:] -data = pd.DataFrame() - -# keep track of which columns are categorical and what -# those columns' value mappings are -# structure: {colname1: {...}, colname2: {...} } -cat_value_dicts = {} -final_colname = uncleaned_data.columns[len(uncleaned_data.columns) - 1] - -# for each column... -for (colname, colval) in uncleaned_data.iteritems(): - - # check if col is already a number; if so, add col directly - # to new dataframe and skip to next column - if isinstance(colval.values[0], (np.integer, float)): - data[colname] = uncleaned_data[colname].copy() - continue - - # structure: {0: "lilac", 1: "blue", ...} - new_dict = {} - val = 0 # first index per column - transformed_col_vals = [] # new numeric datapoints - - # if not, for each item in that column... - for (row, item) in enumerate(colval.values): - - # if item is not in this col's dict... - if item not in new_dict: - new_dict[item] = val - val += 1 - - # then add numerical value to transformed dataframe - transformed_col_vals.append(new_dict[item]) - - # reverse dictionary only for final col (0, 1) => (vals) - if colname == final_colname: - new_dict = {value : key for (key, value) in new_dict.items()} - - cat_value_dicts[colname] = new_dict - data[colname] = transformed_col_vals - - -### -------------------------------- ### -### model training ### -### -------------------------------- ### - -# select features and predicton; automatically selects last column as prediction -cols = len(data.columns) -num_features = cols - 1 -x = data.iloc[: , :num_features] -y = data.iloc[: , num_features:] - -# split data into training and testing sets -x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25) - -# instantiate the model (using default parameters) -model = LogisticRegression() -model.fit(x_train, y_train.values.ravel()) -y_pred = model.predict(x_test) - - -### -------------------------------- ### -### article generation ### -### -------------------------------- ### -# borrow file reading function from reader.py - -def get_feat(): - feats = [abs(x) for x in model.coef_[0]] - max_val = max(feats) - idx = feats.index(max_val) - return data.columns[idx] - -acc = str(round(metrics.accuracy_score(y_test, y_pred) * 100, 1)) + "%" -most_imp_feat = get_feat() -# info = get_article(acc, most_imp_feat) - - - -### ------------------------------- ### -### interface creation ### -### ------------------------------- ### - - -# predictor for generic number of features -def general_predictor(*args): - features = [] - - # transform categorical input - for colname, arg in zip(data.columns, args): - if (colname in cat_value_dicts): - features.append(cat_value_dicts[colname][arg]) - else: - features.append(arg) - - # predict single datapoint - new_input = [features] - result = model.predict(new_input) - return cat_value_dicts[final_colname][result[0]] - -# add data labels to replace those lost via star-args - - -block = gr.Blocks() - -with open('info.md') as f: - with block: - gr.Markdown(f.readline()) - gr.Markdown('Take the quiz to get a personalized recommendation using AI.') - - with gr.Row(): - with gr.Box(): - inputls = [] - for colname in data.columns: - # skip last column - if colname == final_colname: - continue - - # access categories dict if data is categorical - # otherwise, just use a number input - if colname in cat_value_dicts: - radio_options = list(cat_value_dicts[colname].keys()) - inputls.append(gr.inputs.Dropdown(choices=radio_options, type="value", label=colname)) - else: - # add numerical input - inputls.append(gr.inputs.Number(label=colname)) - gr.Markdown("
        ") - - submit = gr.Button("Click to see your personalized result!", variant="primary") - gr.Markdown("
        ") - output = gr.Textbox(label="Your recommendation:", placeholder="your recommendation will appear here") - - submit.click(fn=general_predictor, inputs=inputls, outputs=output) - gr.Markdown("
        ") - - with gr.Row(): - with gr.Box(): - gr.Markdown(f"

        Accuracy:

        {acc}") - with gr.Box(): - gr.Markdown(f"

        Most important feature:

        {most_imp_feat}") - - gr.Markdown("
        ") - - with gr.Box(): - gr.Markdown('''⭐ Note that model accuracy is based on the uploaded data.csv and reflects how well the AI model can give correct recommendations for that dataset. Model accuracy and most important feature can be helpful for understanding how the model works, but should not be considered absolute facts about the real world.''') - - with gr.Box(): - with open('info.md') as f: - f.readline() - gr.Markdown(f.read()) - -# show the interface -block.launch() \ No newline at end of file diff --git a/spaces/simonduerr/ProteinMPNN/ProteinMPNN/vanilla_proteinmpnn/examples/submit_example_4.sh b/spaces/simonduerr/ProteinMPNN/ProteinMPNN/vanilla_proteinmpnn/examples/submit_example_4.sh deleted file mode 100644 index 1d167d3a769df129068698db31792a1b63e97e4d..0000000000000000000000000000000000000000 --- a/spaces/simonduerr/ProteinMPNN/ProteinMPNN/vanilla_proteinmpnn/examples/submit_example_4.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -#SBATCH -p gpu -#SBATCH --mem=32g -#SBATCH --gres=gpu:rtx2080:1 -#SBATCH -c 3 -#SBATCH --output=example_4.out - -source activate mlfold - -folder_with_pdbs="../PDB_complexes/pdbs/" - -output_dir="../PDB_complexes/example_4_outputs" -if [ ! -d $output_dir ] -then - mkdir -p $output_dir -fi - - -path_for_parsed_chains=$output_dir"/parsed_pdbs.jsonl" -path_for_assigned_chains=$output_dir"/assigned_pdbs.jsonl" -path_for_fixed_positions=$output_dir"/fixed_pdbs.jsonl" -chains_to_design="A C" -#The first amino acid in the chain corresponds to 1 and not PDB residues index for now. -fixed_positions="1 2 3 4 5 6 7 8 23 25, 10 11 12 13 14 15 16 17 18 19 20 40" #fixing/not designing residues 1 2 3...25 in chain A and residues 10 11 12...40 in chain C - -python ../helper_scripts/parse_multiple_chains.py --input_path=$folder_with_pdbs --output_path=$path_for_parsed_chains - -python ../helper_scripts/assign_fixed_chains.py --input_path=$path_for_parsed_chains --output_path=$path_for_assigned_chains --chain_list "$chains_to_design" - -python ../helper_scripts/make_fixed_positions_dict.py --input_path=$path_for_parsed_chains --output_path=$path_for_fixed_positions --chain_list "$chains_to_design" --position_list "$fixed_positions" - -python ../protein_mpnn_run.py \ - --jsonl_path $path_for_parsed_chains \ - --chain_id_jsonl $path_for_assigned_chains \ - --fixed_positions_jsonl $path_for_fixed_positions \ - --out_folder $output_dir \ - --num_seq_per_target 2 \ - --sampling_temp "0.1" \ - --batch_size 1 diff --git a/spaces/simonraj/ELOralCoachv2/custom.css b/spaces/simonraj/ELOralCoachv2/custom.css deleted file mode 100644 index 04cfcae16e9a80be36e60dae1af0fea46501559c..0000000000000000000000000000000000000000 --- a/spaces/simonraj/ELOralCoachv2/custom.css +++ /dev/null @@ -1,5 +0,0 @@ -/* custom.css */ -.gradio_output_text { - height: 1000px; - overflow-y: auto; -} diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Clash of Clans Serveur Private APK How to Download and Play the Ultimate Strategy Game.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Clash of Clans Serveur Private APK How to Download and Play the Ultimate Strategy Game.md deleted file mode 100644 index b6123e76afe002a61e537c65730b646a0fb6a418..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Clash of Clans Serveur Private APK How to Download and Play the Ultimate Strategy Game.md +++ /dev/null @@ -1,93 +0,0 @@ -
        -

        Clash of Clans Serveur Private APK: How to Play CoC on a Custom Server

        -

        Clash of Clans is one of the most popular and addictive mobile games in the world, with millions of players competing and collaborating in clans, building bases, raiding enemies, and collecting resources. But what if you want to play CoC on your own terms, without the restrictions and limitations imposed by the official game? That's where a Clash of Clans serveur private APK comes in handy.

        -

        clash of clans serveur private apk


        Download Ziphttps://ssurll.com/2uNWwu



        -

        A Clash of Clans serveur private APK is a modified version of the game that runs on a custom server, not on the Supercell server. This means that you can enjoy the game with unlimited resources, custom buildings, heroes, troops, and more. You can also play with other players who are using the same server, or create your own clan and invite your friends.

        -

        However, playing on a private server also has some drawbacks and risks, such as compatibility issues, security threats, legal troubles, and ethical dilemmas. That's why you need to be careful and informed before you download and install a Clash of Clans serveur private APK on your device.

        -

        In this article, we will explain what Clash of Clans is and why it is so popular, what a private server is and how it works, how to download and install a Clash of Clans serveur private APK, and what are the best Clash of Clans serveur private APKs to try out. Let's get started!

        -

        What is Clash of Clans and why is it so popular?

        -

        Clash of Clans is a freemium strategy game developed by Supercell, a Finnish company that also created other hit games like Hay Day, Boom Beach, Clash Royale, and Brawl Stars. The game was released in 2012 for iOS devices and in 2013 for Android devices. Since then, it has become one of the most downloaded and highest-grossing apps in the world, with over 500 million downloads and billions of dollars in revenue.

        -

        The gameplay and features of Clash of Clans

        -

        The gameplay of Clash of Clans is simple yet engaging. You start with a small village that you have to build up by collecting resources like gold, elixir, dark elixir, gems, and builder base gold. You can use these resources to upgrade your buildings, defenses, walls, traps, heroes, troops, spells, siege machines, pets, etc.

        -

        clash of clans private server download apk
        -clash of clans mod apk private server
        -clash of clans hack apk private server
        -clash of clans atrasis private server apk
        -clash of clans plenixclash private server apk
        -clash of clans nulls private server apk
        -clash of clans fhx private server apk
        -clash of clans magic private server apk
        -clash of clans lights private server apk
        -clash of clans unlimited gems private server apk
        -clash of clans town hall 15 private server apk
        -clash of clans latest version private server apk
        -clash of clans ios private server apk
        -clash of clans pc private server apk
        -clash of clans online private server apk
        -clash of clans free download private server apk
        -clash of clans best private server apk
        -clash of clans supercell private server apk
        -clash of clans custom heroes private server apk
        -clash of clans new update private server apk
        -clash of clans builder base private server apk
        -clash of clans unlimited troops private server apk
        -clash of clans offline private server apk
        -clash of clans no root private server apk
        -clash of clans working private server apk
        -clash of clans original private server apk
        -clash of clans easy install private server apk
        -clash of clans stable private server apk
        -clash of clans fast private server apk
        -clash of clans fun private server apk
        -clash of clans cheats private server apk
        -clash of clans mega mod private server apk
        -clash of clans unlimited everything private server apk
        -clash of clans hacked version private server apk
        -clash of clans 2023 version private server apk
        -clash of clans android 11 private server apk
        -clash of clans royal war private server apk
        -clash of clans th15 modded troops and buildings private server apk
        -clash of clans unlimited resources and gems gold elixir dark elixir builder gold builder elixir and clan games rewards unlocked in the shop with no cost and no cooldown time for anything in the game including training troops spells siege machines heroes and buildings upgrade time and cost reduced to 0 seconds and 0 resources in the game with no limit on how many times you can do it in a day on the latest version 15.83.19 updated on december 21st 2022 with the new town hall 15 update with new hero pets and new defense levels and new troops levels and new spells levels and new siege machines levels and new super troops levels and new clan castle levels and new walls levels and new decorations and obstacles and events and challenges and achievements and clan perks and clan games and clan war leagues and friendly wars and friendly challenges and versus battles and builder base and otto hut and gear up and practice mode and war base layout editor and home village layout editor and builder base layout editor and profile layout editor and news tab layout editor and settings layout editor and more features added in the game by the developers supercell on their official servers which are compatible with ios android and pc so everyone can enjoy playing their game without any circumstances on their devices with their friends online or offline anytime they want to play their game without any connection errors or any issues or any bugs or any glitches or any crashes or any lags or any delays or any interruptions or any problems or any troubles or any difficulties or any complications or any inconveniences or any annoyances or any nuisances or any hassles or any worries or any concerns or any doubts or any fears or any risks or any dangers or any threats or any harms or any damages or any losses or any injuries or any pains or any sufferings or any troubles or any distresses or any sorrows or any sadnesses or any miseries or any agonies or any anguishes or any torments or any afflictions or any adversities or any hardships or any difficulties or anything else that can ruin their gaming experience on their devices with their friends online or offline anytime they want to play their game without having to spend a single penny on their game because everything is free in their game on their devices with their friends online or offline anytime they want to play their game on the best most amazing most awesome most incredible most fantastic most fabulous most wonderful most marvelous most astonishing most astounding most stunning most spectacular most breathtaking most mind-blowing most jaw-dropping most eye-popping most awe-inspiring most impressive most extraordinary most outstanding most excellent most superb most magnificent most glorious most splendid most beautiful most gorgeous most lovely most charming most delightful most exquisite most elegant most graceful most stylish most fashionable most chic most trendy most cool most awesome most amazing most incredible most fantastic most fabulous most wonderful most marvelous most astonishing most astounding most stunning most spectacular most breathtaking most mind-blowing most jaw-dropping most eye-popping most awe-inspiring most impressive most extraordinary most outstanding most excellent most superb most magnificent

        -

        You can also join or create a clan with other players from around the world. You can chat with your clanmates, donate and request troops, participate in clan wars, clan games, clan war leagues, etc. You can also attack other players' bases to loot their resources or trophies. You can also defend your base from enemy attacks by setting up your defenses strategically.

        -

        The game offers various modes and challenges for different levels of players. You can play in the home village or the builder base mode. You can also play in single-player or multiplayer mode. You can also take part in special events like seasonal challenges, clan war leagues, clan games, etc.

        -

        The challenges and limitations of Clash of Clans

        -

        While Clash of Clans is undoubtedly fun and addictive, it also has some challenges and limitations that can frustrate some players. Some of these challenges and limitations are: - The game requires a lot of time, patience, and dedication to progress and reach higher levels. You have to wait for long hours or days for your buildings, troops, heroes, etc. to upgrade. You also have to farm resources constantly to afford the upgrades. - The game can be very competitive and stressful, especially for players who care about their trophies, rankings, clan wars, etc. You have to deal with losing attacks, getting raided, dropping trophies, facing stronger opponents, etc. - The game can be very expensive and tempting, especially for players who want to speed up their progress or get an edge over others. You have to spend real money to buy gems, which are the premium currency of the game. You can use gems to skip the waiting time, buy resources, boost your production, etc. - The game can be very repetitive and boring, especially for players who have been playing for a long time or have reached the endgame. You have to do the same things over and over again, like farming resources, attacking bases, upgrading buildings, etc.

        What is a private server and how does it work?

        -

        A private server is a server that is not owned or controlled by Supercell, the developer of Clash of Clans. A private server is usually hosted by a third-party provider or a group of enthusiasts who want to create their own version of the game.

        -

        The benefits and drawbacks of playing on a private server

        -

        Playing on a private server can have some benefits and drawbacks for players who want to experience Clash of Clans differently. Some of these benefits and drawbacks are: - The benefits of playing on a private server are: - You can enjoy the game with unlimited resources, such as gold, elixir, dark elixir, gems, etc. You don't have to worry about farming resources or spending money to buy gems. - You can enjoy the game with custom features, such as buildings, heroes, troops, spells, siege machines, pets, etc. You can also modify the game settings, such as the difficulty level, the attack time limit, the troop capacity, etc. - You can enjoy the game with other players who are using the same server. You can chat with them, join or create clans with them, participate in clan wars with them, etc. - The drawbacks of playing on a private server are: - You can face compatibility issues with your device or your game version. You may need to root or jailbreak your device or download a different APK file to play on a private server. - You can face security threats from malware or hackers. You may expose your device or your personal information to viruses or phishing attacks when you download or install a private server APK file from an unknown source. - You can face legal troubles from Supercell or other authorities. You may violate the terms of service or the intellectual property rights of Supercell when you use a private server. You may also break the law in some countries where using a private server is illegal. - You can face ethical dilemmas from yourself or other players. You may feel guilty or unfair when you use a private server that gives you an advantage over other players who play on the official server. You may also lose respect or credibility from other players who consider using a private server as cheating or hacking.

        -

        The legal and ethical issues of using a private server

        -

        Using a private server is not officially supported or endorsed by Supercell, the developer of Clash of Clans. In fact, Supercell considers using a private server as a violation of their terms of service and their intellectual property rights.

        -

        According to Supercell's terms of service, you agree not to: - Use cheats, exploits, automation software, bots, hacks, mods or any unauthorized third-party software designed to modify or interfere with the Service or any Supercell game experience; - Modify or cause to be modified any files that are a part of the Service or any Supercell game without Supercell’s express written consent; - Disrupt, overburden, or aid or assist in the disruption or overburdening of any computer or server used to offer or support the Service or any Supercell game environment; - Institute, assist or become involved in any type of attack including distribution of a virus spyware/adware/malware/phishing/keylogger/remote access trojan (RAT) programs/worms/rootkit/bootkit/cryptotrojan/ransomware/other malicious software (malware), denial-of-service attacks upon the Service; - Attempt to gain unauthorized access to the Service; - Harvesting information about other users without their consent; - Use any unauthorized third-party software that intercepts accesses reads manipulates modifies adds deletes changes transmits redirects distributes duplicates interferes with communicates with or otherwise interacts with the Service or any Supercell game; - Reverse engineer, decompile, disassemble, decipher or otherwise attempt to derive the source code for any underlying software or other intellectual property used to provide the Service or any Supercell game, or to obtain any information from the Service or any Supercell game using any method not expressly permitted by Supercell; - Copy, modify, or distribute rights or content from any Supercell site or game, or Supercell's copyrights or trademarks or use any method to copy or distribute the content of the Service except as specifically allowed in these Terms of Service; - Solicit or attempt to solicit personal information from other users of the Service or any Supercell game; - Collect, harvest or post anyone’s private information, including personally identifiable information (whether in text, image or video form), identification documents, or financial information through the Service; - Use the Service in violation of any applicable law or regulation; - Use the Service for fraudulent purposes.

        -

        If you violate these terms of service, Supercell has the right to take action against you, such as: - Suspending or terminating your account and access to the Service; - Deleting your account and all related information and files; - Taking legal action against you for damages caused by your violation; - Reporting you to law enforcement authorities.

        -

        Moreover, using a private server may also be illegal in some countries where it is considered as hacking, piracy, or theft of intellectual property. You may face fines, lawsuits, or even criminal charges if you are caught using a private server in these countries.

        -

        Furthermore, using a private server may also be unethical in some cases where it is seen as cheating, unfair, or disrespectful to other players who play on the official server. You may lose the trust, respect, or friendship of other players who consider using a private server as a dishonest or dishonorable act.

        -

        How to download and install a Clash of Clans serveur private APK?

        -

        If you still want to try playing on a private server despite the risks and challenges involved, you need to download and install a Clash of Clans serveur private APK on your device. A Clash of Clans serveur private APK is a file that contains the modified version of the game that can run on a custom server.

        -

        The requirements and precautions for using a private server APK

        -

        Before you download and install a Clash of Clans serveur private APK, you need to make sure that you meet the following requirements and take the following precautions: - You need to have an Android device that can run Clash of Clans. The minimum system requirements are Android 4.4 and 1 GB of RAM. - You need to have enough storage space on your device to download and install the APK file. The file size may vary depending on the server, but it is usually around 100 MB. - You need to have a stable internet connection to play on a private server. The speed and quality of your connection may affect your gameplay experience. - You need to backup your original Clash of Clans data before you install a private server APK. You can do this by linking your account to Google Play Games or Facebook. This way, you can restore your data if you want to switch back to the official server. - You need to uninstall your original Clash of Clans app before you install a private server APK. You cannot have both apps on your device at the same time. - You need to enable unknown sources on your device settings before you install a private server APK. This will allow you to install apps from sources other than Google Play Store. However, this also exposes your device to potential malware or hackers. - You need to download and install a private server APK from a trusted and reliable source. You can find many websites that offer different Clash of Clans serveur private APKs online, but not all of them are safe or legitimate. You should do some research and read some reviews before you choose a website.

        -

        The steps and tips for downloading and installing a private server APK

        -

        After you have met the requirements and taken the precautions for using a private server APK, you can follow these steps and tips for downloading and installing it: - Step 1: Go to the website that offers the Clash of Clans serveur private APK that you want to use. Make sure that it is compatible with your device and game version. - Step 2: Click on the download button and wait for the file to be downloaded on your device. It may take some time depending on your internet speed and file size. - Step 3: Locate the file on your device storage and tap on it to start the installation process. You may see some warnings or prompts asking for permissions. Allow them if you trust the source. - Step 4: Wait for the installation process to finish. It may take some time depending on your device and file size. You may see some notifications or messages confirming the installation. - Step 5: Open the app and enjoy playing Clash of Clans on a private server. You may see some differences or changes in the game interface, settings, features, etc. You may also need to create a new account or use a different username and password to play on a private server. - Tip 1: You can download and install more than one Clash of Clans serveur private APK on your device, as long as they have different package names and signatures. This way, you can try different servers and switch between them easily. - Tip 2: You can update your Clash of Clans serveur private APK whenever there is a new version available from the source website. However, you may need to uninstall and reinstall the app or clear the app data and cache to avoid any errors or glitches. - Tip 3: You can delete your Clash of Clans serveur private APK whenever you want to stop using it or switch back to the official server. However, you may lose your progress and data on the private server, unless you have backed it up somehow.

        What are the best Clash of Clans serveur private APKs to try out?

        -

        There are many Clash of Clans serveur private APKs available online, but not all of them are worth trying out. Some of them may be outdated, unstable, insecure, or boring. To help you choose the best ones, we have compiled a list of some of the most popular and reliable Clash of Clans serveur private APKs that you can download and install on your device.

        -

        Clash of Magic: A feature-rich and stable server with four sub-servers

        -

        Clash of Magic is one of the oldest and most popular Clash of Clans serveur private APKs. It offers four sub-servers with different features and modes. You can choose the one that suits your preferences and play style.

        -

        The four sub-servers are: - S1: The Black Magic: This server gives you unlimited resources, custom buildings, heroes, troops, spells, etc. You can also modify the game settings as you wish. - S2: The Hall of Magic: This server gives you unlimited resources, custom buildings, heroes, troops, spells, etc. However, it has some limitations on the number and level of buildings and troops that you can use. - S3: The Power of Magic: This server gives you normal resources, normal buildings, heroes, troops, spells, etc. However, it has some modifications and enhancements that make the game more fun and challenging. - S4: The Power of Magic 2: This server gives you normal resources, normal buildings, heroes, troops, spells, etc. However, it has some modifications and enhancements that make the game more fun and challenging.

        -

        You can download Clash of Magic from its official website: https://clashofmagic.net/

        -

        PlenixClash: A user-friendly and compatible server with custom mods

        -

        PlenixClash is another popular and reliable Clash of Clans serveur private APK. It offers a user-friendly and compatible server with custom mods that enhance the game experience.

        -

        Some of the features and mods that PlenixClash offers are: - Unlimited resources - Custom buildings - Custom heroes - Custom troops - Custom spells - Custom siege machines - Custom pets - Custom commands - Custom events - Custom challenges - Custom skins - Custom maps

        -

        You can download PlenixClash from its official website: https://plenixclash.com/

        -

        Clash of Dreams: A fast and secure server with unlimited resources

        -

        Clash of Dreams is another popular and reliable Clash of Clans serveur private APK. It offers a fast and secure server with unlimited resources that allow you to play the game without any restrictions or limitations.

        -

        Some of the features that Clash of Dreams offers are: - Unlimited resources - Normal buildings - Normal heroes - Normal troops - Normal spells - Normal siege machines - Normal pets - Fast loading time - High security level - No ads

        -

        You can download Clash of Dreams from its official website: https://clashofdreams.xyz/

        -

        Conclusion

        -

        Clash of Clans is a fun and addictive game that millions of players enjoy worldwide. However, some players may want to play the game on their own terms, without the restrictions and limitations imposed by the official game. That's why they may opt for using a Clash of Clans serveur private APK.

        -

        A Clash of Clans serveur private APK is a modified version of the game that runs on a custom server, not on the Supercell server. This means that you can enjoy the game with unlimited resources, custom features, and other players who are using the same server.

        -

        However, using a private server also has some drawbacks and risks, such as compatibility issues, security threats, legal troubles, and ethical dilemmas. That's why you need to be careful and informed before you download and install a Clash of Clans serveur private APK on your device.

        -

        In this article, we have explained what Clash of Clans is and why it is so popular, what a private server is and how it works, how to download and install a Clash of Clans serveur private APK, and what are the best Clash of Clans serveur private APKs to try out. We hope that this article has been helpful and informative for you.

        -

        If you decide to use a Clash of Clans serveur private APK, we advise you to do so at your own risk and responsibility. We also recommend you to respect the rights and feelings of other players who play on the official server. And most importantly, we encourage you to have fun and enjoy the game!

        -

        FAQs

        -

        Here are some frequently asked questions about Clash of Clans serveur private APKs:

        - - Q: Is using a Clash of Clans serveur private APK safe? - A: Using a Clash of Clans serveur private APK is not completely safe, as it may expose your device or your personal information to malware or hackers. You may also face legal troubles from Supercell or other authorities if you use a private server that violates their terms of service or their intellectual property rights. Therefore, you should only use a Clash of Clans serveur private APK from a trusted and reliable source, and at your own risk and responsibility. - Q: Is using a Clash of Clans serveur private APK cheating? - A: Using a Clash of Clans serveur private APK is not technically cheating, as it does not affect the gameplay or the progress of other players who play on the official server. However, using a Clash of Clans serveur private APK may be considered as unfair or dishonest by some players who play on the official server, as it gives you an advantage over them in terms of resources, features, etc. Therefore, you should respect the rights and feelings of other players who play on the official server, and avoid using a Clash of Clans serveur private APK for malicious or abusive purposes. - Q: Can I play with my friends who play on the official server if I use a Clash of Clans serveur private APK? - A: No, you cannot play with your friends who play on the official server if you use a Clash of Clans serveur private APK. You can only play with other players who are using the same server as you. If you want to play with your friends who play on the official server, you need to switch back to the official server by uninstalling the Clash of Clans serveur private APK and reinstalling the original Clash of Clans app. - Q: Can I switch between different servers if I use a Clash of Clans serveur private APK? - A: Yes, you can switch between different servers if you use a Clash of Clans serveur private APK, as long as they have different package names and signatures. You can download and install more than one Clash of Clans serveur private APK on your device, and switch between them easily. However, you may need to uninstall and reinstall the app or clear the app data and cache to avoid any errors or glitches when switching between servers. - Q: Can I update my Clash of Clans serveur private APK when there is a new version available? - A: Yes, you can update your Clash of Clans serveur private APK when there is a new version available from the source website. However, you may need to uninstall and reinstall the app or clear the app data and cache to avoid any errors or glitches when updating your Clash of Clans serveur private APK.

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free Fire MAX Mod Menu APK v2.99.1 Unlimited Diamonds Hack 99999 for 2023.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free Fire MAX Mod Menu APK v2.99.1 Unlimited Diamonds Hack 99999 for 2023.md deleted file mode 100644 index 62afb63d9f52c672acc1de06d5a929d2288e62f2..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free Fire MAX Mod Menu APK v2.99.1 Unlimited Diamonds Hack 99999 for 2023.md +++ /dev/null @@ -1,150 +0,0 @@ -
        -

        Free Fire MAX Diamond Hack 99999 APK 2023: Is It Possible?

        -

        Free Fire MAX is a popular battle royale game developed by Garena that offers enhanced graphics, animations, and sound effects for a more immersive gaming experience. The game has millions of fans around the world who love to compete and customize their characters, weapons, and vehicles with various skins and items. However, most of these items require diamonds, which are the premium currency of the game. Diamonds can be bought with real money, but many players are looking for ways to get them for free or at a cheaper price. Some of them even resort to using modded APKs or online generators that claim to provide unlimited diamonds for free. But are these methods really safe and effective? Or are they just scams that can harm your account and device? In this article, we will answer these questions and show you how to get Free Fire MAX diamonds legally in 2023.

        -

        free fire max diamond hack 99999 apk 2023


        Download Ziphttps://ssurll.com/2uO04Q



        -

        How to get Free Fire MAX diamonds legally in 2023

        -

        There are several ways to get Free Fire MAX diamonds without breaking the rules or risking your security. Here are some of them:

        -

        Redeem codes: How to use them and where to find them

        -

        Redeem codes are special codes that Garena releases on certain occasions, such as festivals, events, or milestones. They can be used on the Rewards Redemption Site to get various rewards, including free diamonds, items, weapons, and currencies. To use a redeem code, you need to follow these steps:

        -
          -
        1. Visit the Rewards Redemption Site from Garena's official website.
        2. -
        3. Login with your Free Fire MAX account.
        4. -
        5. Enter the redeem code in the box and click confirm.
        6. -
        7. Check your in-game mail for the reward.
        8. -
        -

        To find active redeem codes, you can follow Garena's social media accounts, such as Facebook, Twitter, Instagram, or YouTube. You can also check out some reliable websites or blogs that post the latest redeem codes regularly.

        -

        Events: How to participate and what rewards to expect

        -

        Garena is known for updating Free Fire MAX with various in-game events that offer exciting challenges and rewards for players. These events can be a great opportunity to earn some free diamonds and other items by completing certain tasks or missions. To participate in an event, you need to follow these steps:

        -
          -
        1. Open the game and tap on the calendar icon on the top right corner.
        2. -
        3. Select the event that you want to join and tap on it.
        4. -
        5. Read the event details and rules carefully.
        6. -
        7. Complete the event objectives and claim your rewards.
        8. -
        -

        The rewards for each event may vary depending on the difficulty and duration of the event. Some of the common rewards include free diamonds, gold coins, vouchers, skins, crates, and bundles. You can also get some exclusive items that are only available during the event period.

        -

        Advance Server: How to register and what benefits to enjoy

        -

        Advance Server is a special server that Garena uses to test new features and updates before they are released to the public. Players who join the Advance Server can try out the latest changes and give feedback to the developers. They can also get some free diamonds and other rewards for reporting bugs and glitches. To join the Advance Server, you need to follow these steps:

        -
          -
        1. Visit the Advance Server website from Garena's official website.
        2. -
        3. Login with your Free Fire MAX account.
        4. -
        5. Fill out the registration form and submit it.
        6. -
        7. Wait for the confirmation email and download the Advance Server APK file.
        8. -
        9. Install the APK file on your device and open it.
        10. -
        11. Login with your Free Fire MAX account and enjoy the new features.
        12. -
        -

        The benefits of joining the Advance Server include getting free diamonds, gold coins, and other rewards for reporting bugs and glitches. You can also get some exclusive items that are only available on the Advance Server. However, you should note that the Advance Server has limited slots and is only open for a certain period of time. You should also backup your data before joining the Advance Server, as it may not be compatible with your regular server.

        -

        How to avoid Free Fire MAX diamond hack scams in 2023

        -

        While there are some legitimate ways to get Free Fire MAX diamonds for free or at a cheaper price, there are also many scams that try to lure players with false promises and offers. These scams can be very dangerous and can result in losing your account, money, or personal information. Here are some tips on how to avoid Free Fire MAX diamond hack scams in 2023:

        -

        Why you should not trust modded APKs or online generators

        -

        Modded APKs are modified versions of the original game files that claim to provide unlimited diamonds, items, or features for free. Online generators are websites or apps that claim to generate free diamonds or codes for you. However, these methods are not only illegal but also risky. Here are some reasons why you should not trust modded APKs or online generators:

        -

        free fire max unlimited diamonds mod apk download 2023
        -how to get free diamonds in free fire max 2023 without hack
        -free fire max diamond generator 2023 no human verification
        -free fire max mod menu apk with diamond hack 2023
        -free fire max redeem code for diamonds 2023
        -free fire max diamond hack 99999 apk download latest version
        -free fire max diamond hack online 2023 without survey
        -free fire max diamond hack script 2023 download
        -free fire max diamond hack app 2023 for android
        -free fire max diamond hack tool 2023 no password
        -free fire max diamond hack apk ob38 update 2023
        -free fire max advance server registration for diamonds 2023
        -free fire max diamond hack website 2023 working
        -free fire max diamond hack apk unlimited money and health 2023
        -free fire max diamond hack trick 2023 new
        -free fire max diamond hack apk v2.99.1 download for pc
        -free fire max diamond hack kaise kare 2023 in hindi
        -free fire max diamond hack apk ios 2023
        -free fire max diamond hack without ban 2023
        -free fire max diamond hack tamil 2023 video
        -free fire max diamond hack telugu 2023 easy
        -free fire max diamond hack malayalam 2023 real
        -free fire max diamond hack bangla 2023 tutorial
        -free fire max diamond hack nepali 2023 legit
        -free fire max diamond hack urdu 2023 simple
        -free fire max diamond hack indonesia 2023 terbaru
        -free fire max diamond hack philippines 2023 legit
        -free fire max diamond hack brazil 2023 atualizado
        -free fire max diamond hack vietnam 2023 moi nhat
        -free fire max diamond hack thailand 2023 working
        -free fire max diamond hack turkey 2023 güncel
        -free fire max diamond hack pakistan 2023 original
        -free fire max diamond hack egypt 2023 new
        -free fire max diamond hack saudi arabia 2023 latest
        -free fire max diamond hack iran 2023 best
        -free fire max diamond hack mexico 2023 gratis
        -free fire max diamond hack argentina 2023 sin verificacion humana
        -free fire max diamond hack colombia 2023 facil y rapido
        -free fire max diamond hack peru 2023 sin baneo
        -free fire max diamond hack chile 2023 sin encuesta
        -free fire max diamond hack venezuela 2023 sin root
        -free fire max diamond hack ecuador 2023 sin aplicacion
        -free fire max diamond hack bolivia 2023 sin contraseña
        -free fire max diamond hack paraguay 2023 sin descargar nada
        -free fire max diamond hack uruguay 2023 sin registro
        -free fire max diamond hack costa rica 2023 sin correo electronico
        -free fire max diamond hack panama 2023 sin numero de telefono
        -free fire max diamond hack guatemala 2023 sin tarjeta de credito
        -free fire max diamond hack el salvador 2023 sin codigo de activacion

        -
          -
        • They can contain malware or viruses that can harm your device or steal your data.
        • -
        • They can get your account banned or suspended by Garena for violating the terms of service.
        • -
        • They can ask for your account details or personal information that can be used for phishing or identity theft.
        • -
        • They can redirect you to fake or malicious websites or apps that can charge you money or show you ads.
        • -
        • They can fail to deliver what they promise or give you low-quality or expired rewards.
        • -
        -

        The bottom line is that modded APKs or online generators are not worth the risk and hassle. They can ruin your gaming experience and expose you to various threats. You should always download the game from official sources and never share your account details or personal information with anyone.

        -

        How to spot and report fake websites or apps

        -

        Fake websites or apps are those that pretend to be affiliated with Garena or Free Fire MAX but are actually created by scammers to trick players into giving them money or information. They can look very similar to the official ones but have some subtle differences that can help you identify them. Here are some ways to spot and report fake websites or apps:

        -
          -
        • Check the URL or domain name of the website or app. Fake ones usually have spelling errors, extra characters, or different extensions than the official ones.
        • -
        • Check the design and layout of the website or app. Fake ones usually have poor quality, outdated, or inconsistent graphics, fonts, or colors than the official ones.
        • -
        • Check the content and information of the website or app. Fake ones usually have grammatical errors, irrelevant, or misleading details than the official ones.
        • -
        • Check the reviews and ratings of the website or app. Fake ones usually have low ratings, negative feedback, or no reviews at all than the official ones.
        • -
        • Check the contact and support options of the website or app. Fake ones usually have no contact details, invalid email addresses, or unresponsive customer service than the official ones.
        • -
        -

        If you encounter a fake website or app, you should report it to Garena immediately through their official channels, such as email, social media, or customer service. You should also warn other players about it and avoid clicking on any links or downloading any files from it.

        -

        How to protect your account and device from hackers

        -

        Hackers are people who use illegal methods to access or manipulate your account or device for their own benefit. They can use various techniques, such as phishing, malware, brute force, or social engineering, to trick you into giving them your credentials or data. They can also use modded APKs or online generators to inject malicious code into your device or account. Here are some tips on how to protect your account and device from hackers:

        -
          -
        • Use a strong and unique password for your account and change it regularly. You can use a password manager to generate and store your passwords securely.
        • -
        • Enable two-factor authentication for your account and verify your email address and phone number. This will add an extra layer of security to your login process.
        • -
        • Do not share your account details or personal information with anyone, even if they claim to be from Garena or Free Fire MAX. Garena will never ask for your password or verification code.
        • -
        • Do not click on any suspicious links or attachments that you receive via email, social media, or chat. They can lead you to fake websites or apps that can steal your data or infect your device.
        • -
        • Do not download or install any modded APKs or online generators that promise free diamonds or items. They can contain malware or viruses that can harm your device or account.
        • -
        • Do not use public Wi-Fi networks or devices to access your account or play the game. They can be hacked or compromised by hackers who can intercept your data or activity.
        • -
        • Use a reputable antivirus software and firewall to protect your device from malware and viruses. You should also update your device and game regularly to fix any bugs or vulnerabilities.
        • -
        -

        By following these tips, you can keep your account and device safe from hackers and enjoy Free Fire MAX without any worries.

        -

        Conclusion: The best way to enjoy Free Fire MAX in 2023

        -

        Free Fire MAX is a fun and exciting game that offers a lot of features and options for players who love battle royale games. However, it also requires diamonds, which are the premium currency of the game, to unlock and customize various items and skins. While there are some ways to get Free Fire MAX diamonds for free or at a cheaper price, such as redeem codes, events, and Advance Server, there are also many scams that try to trick players into using modded APKs or online generators that claim to provide unlimited diamonds for free. These methods are not only illegal but also risky, as they can result in losing your account, money, or personal information. Therefore, the best way to enjoy Free Fire MAX in 2023 is to avoid these scams and follow the legal and safe ways to get diamonds. By doing so, you can have a great gaming experience and support the developers of the game.

        -

        FAQs: Frequently asked questions about Free Fire MAX diamonds in 2023

        -

        Q1. How much do Free Fire MAX diamonds cost in 2023?

        -

        A1. The price of Free Fire MAX diamonds may vary depending on the region, currency, and payment method. However, here is an approximate price list of Free Fire MAX diamonds in US dollars as of 2023:

        - - - - - - - - - -
        DiamondsPrice
        100$0.99
        310$2.99
        520$4.99
        1060$9.99
        2180$19.99
        5600$49.99
        12000$99.99
        -

        Q2. How can I get free diamonds from Garena?

        -

        A2. Garena sometimes gives away free diamonds to players as a gesture of appreciation or compensation for some issues or errors. You can get free diamonds from Garena by following their official social media accounts, such as Facebook, Twitter, Instagram, or YouTube, where they announce their giveaways and contests. You can also check your in-game mail for any messages from Garena that may contain free diamonds or codes.

        -

        Q3. What can I do with Free Fire MAX diamonds in 2023?

        -

        A3. You can use Free Fire MAX diamonds to buy various items and skins in the game, such as characters, weapons, vehicles, pets, emotes, bundles, crates, and more. You can also use them to spin the Lucky Royale wheel, which gives you a chance to win rare and exclusive items and skins. You can also use them to upgrade your membership, which gives you access to more benefits and discounts in the game.

        -

        Q4. What are the new features of Free Fire MAX in 2023?

        -

        A4. Free Fire MAX is constantly updated with new features and improvements to enhance the gaming experience of the players. Some of the new features of Free Fire MAX in 2023 are:

        -
          -
        • A new map called Bermuda Remastered, which is a revamped version of the classic Bermuda map with more details, locations, and secrets.
        • -
        • A new mode called Clash Squad Ranked Season 8, which is a competitive mode where players can form teams of four and fight against other teams in a best-of-seven series.
        • -
        • A new character called Chrono, who is a futuristic hero with the ability to create a force field that blocks enemy attacks and increases his movement speed.
        • -
        • A new weapon called Vector, which is a submachine gun with high fire rate and accuracy.
        • -
        • A new pet called Beaston, which is a cute and fierce creature that can increase the throwing distance of grenades and gloo walls.
        • -
        -

        Q5. Is Free Fire MAX compatible with Free Fire?

        -

        A5. Yes, Free Fire MAX is compatible with Free Fire, which means that players can play together on the same server regardless of which version they are using. They can also use the same account and data on both versions without any loss or conflict. However, they should note that Free Fire MAX requires higher device specifications and storage space than Free Fire, so they should check if their device can support it before downloading it.

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/sirmews/url-summarizer-playground/openai_summarizer.py b/spaces/sirmews/url-summarizer-playground/openai_summarizer.py deleted file mode 100644 index 3cc0cee2240a8a8ac34d78ce1823fc091da90999..0000000000000000000000000000000000000000 --- a/spaces/sirmews/url-summarizer-playground/openai_summarizer.py +++ /dev/null @@ -1,34 +0,0 @@ -import openai -import json - -def openai_summarizer(content, api_key, model): - openai.api_key = api_key - - prompt = ( - f"Please provide a summary and tags for the following article:\n\n{content}\n" - f"Return the result as a JSON object with the keys 'summary' and 'tags'." - ) - - summary_result = openai.Completion.create( - engine=model, - prompt=prompt, - temperature=0, - max_tokens=100, - top_p=1, - frequency_penalty=0, - presence_penalty=0, - ) - - response_text = summary_result.choices[0].text.strip() - print(f"Response text: {response_text}") # Keep this line to print the response text - - try: - response_json = json.loads(response_text) - summarized_text = response_json.get("summary", "Summary not found.") - tags = response_json.get("tags", []) - except json.JSONDecodeError as e: - print(f"Error decoding JSON: {e}") - summarized_text = response_text.split("\n")[0].strip() - tags = [] - - return summarized_text, tags diff --git a/spaces/skf15963/summary/fengshen/examples/zen2_finetune/ner_zen2_base_ontonotes4.sh b/spaces/skf15963/summary/fengshen/examples/zen2_finetune/ner_zen2_base_ontonotes4.sh deleted file mode 100644 index 1e1237967712a6862e5770e90d4e8db8d074d320..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/examples/zen2_finetune/ner_zen2_base_ontonotes4.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=zen2_base_ontonotes4 # create a short name for your job -#SBATCH --nodes=1 # node count -#SBATCH --ntasks=1 # total number of tasks across all nodes -#SBATCH --cpus-per-task=30 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH --gres=gpu:1 # number of gpus per node -#SBATCH --mail-type=ALL # send email when job begins, ends or failed etc. -#SBATCH -o /cognitive_comp/ganruyi/experiments/ner_finetune/zen2_base_ontonotes4/%x-%j.log # output and error file name (%x=job name, %j=job id) - - -# export CUDA_VISIBLE_DEVICES='2' -export TORCH_EXTENSIONS_DIR=/cognitive_comp/ganruyi/tmp/torch_extendsions - -MODEL_NAME=zen2_base - -TASK=ontonotes4 - -ZERO_STAGE=1 -STRATEGY=deepspeed_stage_${ZERO_STAGE} - -ROOT_DIR=/cognitive_comp/ganruyi/experiments/ner_finetune/${MODEL_NAME}_${TASK} -if [ ! -d ${ROOT_DIR} ];then - mkdir -p ${ROOT_DIR} - echo ${ROOT_DIR} created!!!!!!!!!!!!!! -else - echo ${ROOT_DIR} exist!!!!!!!!!!!!!!! -fi - -DATA_DIR=/cognitive_comp/lujunyu/data_zh/NER_Aligned/OntoNotes4/ -PRETRAINED_MODEL_PATH=/cognitive_comp/ganruyi/hf_models/zen/zh_zen_base_2.0 -PRETRAINED_MODEL_PATH=IDEA-CCNL/Erlangshen-ZEN2-345M-Chinese - -CHECKPOINT_PATH=${ROOT_DIR}/ckpt/ -OUTPUT_PATH=${ROOT_DIR}/predict.json - -DATA_ARGS="\ - --data_dir $DATA_DIR \ - --train_data train.char.bmes \ - --valid_data test.char.bmes \ - --test_data test.char.bmes \ - --train_batchsize 32 \ - --valid_batchsize 16 \ - --max_seq_length 256 \ - --task_name ontonotes4 \ - " - -MODEL_ARGS="\ - --learning_rate 3e-5 \ - --weight_decay 0.1 \ - --warmup_ratio 0.01 \ - --markup bioes \ - --middle_prefix M- \ - " - -MODEL_CHECKPOINT_ARGS="\ - --monitor val_f1 \ - --save_top_k 3 \ - --mode max \ - --every_n_train_steps 200 \ - --save_weights_only True \ - --dirpath $CHECKPOINT_PATH \ - --filename model-{epoch:02d}-{val_f1:.4f} \ - " - -TRAINER_ARGS="\ - --max_epochs 30 \ - --gpus 1 \ - --check_val_every_n_epoch 1 \ - --val_check_interval 200 \ - --default_root_dir $ROOT_DIR \ - " - - -options=" \ - --pretrained_model_path $PRETRAINED_MODEL_PATH \ - --vocab_file $PRETRAINED_MODEL_PATH/vocab.txt \ - --do_lower_case \ - --output_save_path $OUTPUT_PATH \ - $DATA_ARGS \ - $MODEL_ARGS \ - $MODEL_CHECKPOINT_ARGS \ - $TRAINER_ARGS \ -" -SCRIPT_PATH=/cognitive_comp/ganruyi/Fengshenbang-LM/fengshen/examples/zen2_finetune/fengshen_token_level_ft_task.py -/home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options - -# SINGULARITY_PATH=/cognitive_comp/ganruyi/pytorch21_06_py3_docker_image_v2.sif -# python3 $SCRIPT_PATH $options -# source activate base -# singularity exec --nv -B /cognitive_comp/:/cognitive_comp/ $SINGULARITY_PATH /home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options -# /home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options - diff --git a/spaces/skf15963/summary/tokenizers_pegasus.py b/spaces/skf15963/summary/tokenizers_pegasus.py deleted file mode 100644 index f532875987b59a42aca9ad35eb7a1945c992869b..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/tokenizers_pegasus.py +++ /dev/null @@ -1,597 +0,0 @@ -from fengshen.examples.pegasus.data_utils import ( - _is_control, - _is_punctuation, - _is_whitespace, - _is_chinese_char) -from transformers import PreTrainedTokenizer -from transformers import logging -from typing import List, Optional, Tuple, Union -import collections -import os -import unicodedata -import re -import jieba -import sys - -sys.path.append("../../../../") - -jieba.dt.tmp_dir = os.path.expanduser("~/.cache/") -# jieba.enable_parallel(8) -jieba.initialize() - -logger = logging.get_logger(__name__) - -VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} - - -def load_vocab(vocab_file): - """Loads a vocabulary file into a dictionary.""" - vocab = collections.OrderedDict() - with open(vocab_file, "r", encoding="utf-8") as reader: - tokens = reader.readlines() - for index, token in enumerate(tokens): - token = token.rstrip("\n") - vocab[token] = index - return vocab - - -def whitespace_tokenize(text): - """Runs basic whitespace cleaning and splitting on a piece of text.""" - text = text.strip() - if not text: - return [] - tokens = text.split() - return tokens - - -class PegasusTokenizer(PreTrainedTokenizer): - # copy from BertTokenizer - r""" - Construct a Pegasus tokenizer. Based on WordPiece. - This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to - this superclass for more information regarding those methods. - Args: - vocab_file (`str`): - File containing the vocabulary. - do_lower_case (`bool`, *optional*, defaults to `True`): - Whether or not to lowercase the input when tokenizing. - do_basic_tokenize (`bool`, *optional*, defaults to `True`): - Whether or not to do basic tokenization before WordPiece. - never_split (`Iterable`, *optional*): - Collection of tokens which will never be split during tokenization. Only has an effect when - `do_basic_tokenize=True` - unk_token (`str`, *optional*, defaults to `"[UNK]"`): - The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this - token instead. - sep_token (`str`, *optional*, defaults to `"[SEP]"`): - The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for - sequence classification or for a text and a question for question answering. It is also used as the last - token of a sequence built with special tokens. - pad_token (`str`, *optional*, defaults to `"[PAD]"`): - The token used for padding, for example when batching sequences of different lengths. - cls_token (`str`, *optional*, defaults to `"[CLS]"`): - The classifier token which is used when doing sequence classification (classification of the whole sequence - instead of per-token classification). It is the first token of the sequence when built with special tokens. - mask_token (`str`, *optional*, defaults to `"[MASK]"`): - The token used for masking values. This is the token used when training this model with masked language - modeling. This is the token which the model will try to predict. - tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): - Whether or not to tokenize Chinese characters. - This should likely be deactivated for Japanese (see this - [issue](https://github.com/huggingface/transformers/issues/328)). - strip_accents (`bool`, *optional*): - Whether or not to strip all accents. If this option is not specified, then it will be determined by the - value for `lowercase` (as in the original BERT). - """ - - vocab_files_names = VOCAB_FILES_NAMES - model_input_names = ["input_ids", "attention_mask"] - - # pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - # pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION - # max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES - - def __init__(self, - vocab_file, - do_lower_case=True, - do_basic_tokenize=True, - never_split=None, - pad_token="", - eos_token="", - unk_token="", - mask_token="", - mask_token_sent="", - additional_special_tokens=None, - sep_token="[SEP]", - cls_token="[CLS]", - tokenize_chinese_chars=True, - strip_accents=None, - offset=100, - pre_tokenizer=lambda x: jieba.cut(x, HMM=False), - **kwargs): - self.offset = offset - - if additional_special_tokens is not None: - if not isinstance(additional_special_tokens, list): - raise TypeError( - f"additional_special_tokens should be of type {type(list)}, \ - but is {type(additional_special_tokens)}" - ) - - additional_special_tokens_extended = ( - ([mask_token_sent] + additional_special_tokens) - if mask_token_sent not in additional_special_tokens - and mask_token_sent is not None else additional_special_tokens) - - # fill additional tokens with ..., in case not all additional tokens are already taken - additional_special_tokens_extended += [ - f"" for i in range( - len(additional_special_tokens_extended), self.offset - 1) - ] - - if len(set(additional_special_tokens_extended)) != len( - additional_special_tokens_extended): - raise ValueError( - f"Please make sure that the provided additional_special_tokens \ - do not contain an incorrectly shifted list of tokens. \ - Found {additional_special_tokens_extended}." - ) - additional_special_tokens = additional_special_tokens_extended - else: - additional_special_tokens = [ - mask_token_sent - ] if mask_token_sent is not None else [] - # additional_special_tokens += [f"" for i in range(3, self.offset)] - - # print("additional_special_tokens: ", additional_special_tokens) - - if not os.path.isfile(vocab_file): - raise ValueError( - f"Can't find a vocabulary file at path '{vocab_file}'. \ - To load the vocabulary from a Google pretrained " - "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" - ) - - super().__init__( - do_lower_case=do_lower_case, - do_basic_tokenize=do_basic_tokenize, - never_split=never_split, - unk_token=unk_token, - sep_token=sep_token, - pad_token=pad_token, - cls_token=cls_token, - mask_token=mask_token, - eos_token=eos_token, - tokenize_chinese_chars=tokenize_chinese_chars, - additional_special_tokens=additional_special_tokens, - strip_accents=strip_accents, - **kwargs, - ) - - self.pre_tokenizer = pre_tokenizer - self.mask_token_sent = mask_token_sent - self.vocab = load_vocab(vocab_file) - - self.vocab[self.eos_token] = self.vocab.pop("[unused1]") - # self.vocab[self.eos_token] = self.vocab.pop("[unused2]") - self.vocab[self.pad_token] = self.vocab.pop("[PAD]") - self.vocab[self.unk_token] = self.vocab.pop("[UNK]") - - if self.mask_token_sent is not None: - self.vocab[self.mask_token] = self.vocab.pop("[unused3]") - self.vocab[self.mask_token_sent] = self.vocab.pop("[unused2]") - - self.ids_to_tokens = collections.OrderedDict([ - (ids, tok) for tok, ids in self.vocab.items() - ]) - self.do_basic_tokenize = do_basic_tokenize - if do_basic_tokenize: - self.basic_tokenizer = BasicTokenizer( - do_lower_case=do_lower_case, - never_split=never_split, - tokenize_chinese_chars=tokenize_chinese_chars, - strip_accents=strip_accents, - ) - self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, - unk_token=self.unk_token) - - @property - def do_lower_case(self): - return self.basic_tokenizer.do_lower_case - - @property - def vocab_size(self): - return len(self.vocab) - - def get_vocab(self): - return dict(self.vocab, **self.added_tokens_encoder) - - def _tokenize(self, text): - split_tokens = [] - # print("pegasus_tokenizer: ", text) - for text in self.pre_tokenizer(text): - if text in self.vocab: - split_tokens.append(text) - else: - if self.do_basic_tokenize: - for token in self.basic_tokenizer.tokenize( - text, never_split=self.all_special_tokens): - - # If the token is part of the never_split set - if token in self.basic_tokenizer.never_split: - split_tokens.append(token) - else: - split_tokens += self.wordpiece_tokenizer.tokenize( - token) - else: - split_tokens = self.wordpiece_tokenizer.tokenize(text) - return split_tokens - - def _convert_token_to_id(self, token): - """Converts a token (str) in an id using the vocab.""" - return self.vocab.get(token, self.vocab.get(self.unk_token)) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - return self.ids_to_tokens.get(index, self.unk_token) - - @staticmethod - def _cjk_punctuation(): - return u'\uff02\uff03\uff04\uff05\uff06\uff07\uff08\uff09\uff0a\uff0b\uff0c\uff0d\uff0f\uff1a\uff1b\uff1c\uff1d\ - \uff1e\uff20\uff3b\uff3c\uff3d\uff3e\uff3f\uff40\uff5b\uff5c\uff5d\uff5e\uff5f\uff60\uff62\ - \uff63\uff64\u3000\u3001\u3003\u3008\u3009\u300a\u300b\u300c\u300d\u300e\u300f\u3010\u3011\u3014\ - \u3015\u3016\u3017\u3018\u3019\u301a\u301b\u301c\u301d\u301e\u301f\u3030\u303e\u303f\u2013\u2014\ - \u2018\u2019\u201b\u201c\u201d\u201e\u201f\u2026\u2027\ufe4f\ufe51\ufe54\u00b7\uff01\uff1f\uff61\u3002' - - def convert_ids_to_tokens( - self, - ids: Union[int, List[int]], - skip_special_tokens: bool = False) -> Union[str, List[str]]: - """ - Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and - added tokens. - Args: - ids (`int` or `List[int]`): - The token id (or token ids) to convert to tokens. - skip_special_tokens (`bool`, *optional*, defaults to `False`): - Whether or not to remove special tokens in the decoding. - Returns: - `str` or `List[str]`: The decoded token(s). - """ - if isinstance(ids, int): - if ids in self.added_tokens_decoder: - return self.added_tokens_decoder[ids] - else: - return self._convert_id_to_token(ids) - tokens = [] - for index in ids: - index = int(index) - if skip_special_tokens and index in self.all_special_ids and index != 2: - continue - if index in self.added_tokens_decoder: - tokens.append(self.added_tokens_decoder[index]) - else: - tokens.append(self._convert_id_to_token(index)) - return tokens - - def convert_tokens_to_string(self, tokens): - """Converts a sequence of tokens (string) in a single string.""" - # for token in - # tokens = tokens or self.ids_to_tokens(ids) - # tokens = [token for token in tokens if not self._is_special(token)] - - text = '' - for i, token in enumerate(tokens): - if token[:2] == '##': - text += token[2:] - elif len(token) == 1 and _is_chinese_char(ord(token)): - text += token - elif len(token) == 1 and _is_punctuation(token): - text += token - text += ' ' - elif i > 0 and _is_chinese_char(ord(text[-1])): - text += token - elif tokens == "": - continue - else: - text += ' ' - text += token - - text = re.sub(' +', ' ', text) - text = re.sub('\' (re|m|s|t|ve|d|ll) ', '\'\\1 ', text) - punctuation = re.sub(' +', '', self._cjk_punctuation()).strip() + '+-/={(<[' - punctuation_regex = '|'.join([re.escape(p) for p in punctuation]) - punctuation_regex = '(%s) ' % punctuation_regex - text = re.sub(punctuation_regex, '\\1', text) - text = re.sub(r'(\d\.) (\d)', '\\1\\2', text) - - return text.strip() - # out_string = " ".join(tokens).replace(" ##", "").strip() - - def build_inputs_with_special_tokens( - self, - token_ids_0: List[int], - token_ids_1: Optional[List[int]] = None) -> List[int]: - """ - Build model inputs from a sequence or a pair of sequences for sequence classification tasks by concatenating - and adding special tokens. A PEGASUS sequence has the following format, where `X` represents the sequence: - - single sequence: `X ` - - pair of sequences: `A B ` (not intended use) - BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a - separator. - Args: - token_ids_0 (`List[int]`): - List of IDs to which the special tokens will be added. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - Returns: - `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. - """ - if token_ids_1 is None: - return token_ids_0 + [self.eos_token_id] - return token_ids_0 + token_ids_1 + [self.eos_token_id] - - def _special_token_mask(self, seq): - all_special_ids = set( - self.all_special_ids) # call it once instead of inside list comp - # all_special_ids.remove(self.unk_token_id) # is only sometimes special - - return [1 if x in all_special_ids else 0 for x in seq] - - def get_special_tokens_mask( - self, - token_ids_0: List[int], - token_ids_1: Optional[List[int]] = None, - already_has_special_tokens: bool = False) -> List[int]: - """ - Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding - special tokens using the tokenizer `prepare_for_model` method. - Args: - token_ids_0 (`List[int]`): - List of IDs. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - already_has_special_tokens (`bool`, *optional*, defaults to `False`): - Whether or not the token list is already formatted with special tokens for the model. - Returns: - `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. - """ - - if already_has_special_tokens: - return self._special_token_mask(token_ids_0) - elif token_ids_1 is None: - return self._special_token_mask(token_ids_0) + [self.eos_token_id] - else: - return self._special_token_mask(token_ids_0 + - token_ids_1) + [self.eos_token_id] - - def num_special_tokens_to_add(self, pair=False): - """Just EOS""" - return 1 - - def save_vocabulary(self, - save_directory: str, - filename_prefix: Optional[str] = None) -> Tuple[str]: - index = 0 - if os.path.isdir(save_directory): - vocab_file = os.path.join( - save_directory, - (filename_prefix + "-" if filename_prefix else "") + - VOCAB_FILES_NAMES["vocab_file"]) - else: - vocab_file = (filename_prefix + - "-" if filename_prefix else "") + save_directory - with open(vocab_file, "w", encoding="utf-8") as writer: - for token, token_index in sorted(self.vocab.items(), - key=lambda kv: kv[1]): - if index != token_index: - logger.warning( - f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." - " Please check that the vocabulary is not corrupted!") - index = token_index - writer.write(token + "\n") - index += 1 - return (vocab_file, ) - - -class BasicTokenizer(object): - """ - Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). - Args: - do_lower_case (`bool`, *optional*, defaults to `True`): - Whether or not to lowercase the input when tokenizing. - never_split (`Iterable`, *optional*): - Collection of tokens which will never be split during tokenization. Only has an effect when - `do_basic_tokenize=True` - tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): - Whether or not to tokenize Chinese characters. - This should likely be deactivated for Japanese (see this - [issue](https://github.com/huggingface/transformers/issues/328)). - strip_accents: (`bool`, *optional*): - Whether or not to strip all accents. If this option is not specified, then it will be determined by the - value for `lowercase` (as in the original BERT). - """ - - def __init__(self, - do_lower_case=True, - never_split=None, - tokenize_chinese_chars=True, - strip_accents=None): - if never_split is None: - never_split = [] - self.do_lower_case = do_lower_case - self.never_split = set(never_split) - self.tokenize_chinese_chars = tokenize_chinese_chars - self.strip_accents = strip_accents - - def tokenize(self, text, never_split=None): - """ - Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see - WordPieceTokenizer. - Args: - never_split (`List[str]`, *optional*) - Kept for backward compatibility purposes. Now implemented directly at the base class level (see - [`PreTrainedTokenizer.tokenize`]) List of token not to split. - """ - # union() returns a new set by concatenating the two sets. - never_split = self.never_split.union( - set(never_split)) if never_split else self.never_split - text = self._clean_text(text) - - # This was added on November 1st, 2018 for the multilingual and Chinese - # models. This is also applied to the English models now, but it doesn't - # matter since the English models were not trained on any Chinese data - # and generally don't have any Chinese data in them (there are Chinese - # characters in the vocabulary because Wikipedia does have some Chinese - # words in the English Wikipedia.). - if self.tokenize_chinese_chars: - text = self._tokenize_chinese_chars(text) - orig_tokens = whitespace_tokenize(text) - split_tokens = [] - for token in orig_tokens: - if token not in never_split: - if self.do_lower_case: - token = token.lower() - if self.strip_accents is not False: - token = self._run_strip_accents(token) - elif self.strip_accents: - token = self._run_strip_accents(token) - split_tokens.extend(self._run_split_on_punc(token, never_split)) - - output_tokens = whitespace_tokenize(" ".join(split_tokens)) - return output_tokens - - def _run_strip_accents(self, text): - """Strips accents from a piece of text.""" - text = unicodedata.normalize("NFD", text) - output = [] - for char in text: - cat = unicodedata.category(char) - if cat == "Mn": - continue - output.append(char) - return "".join(output) - - def _run_split_on_punc(self, text, never_split=None): - """Splits punctuation on a piece of text.""" - if never_split is not None and text in never_split: - return [text] - chars = list(text) - i = 0 - start_new_word = True - output = [] - while i < len(chars): - char = chars[i] - if _is_punctuation(char): - output.append([char]) - start_new_word = True - else: - if start_new_word: - output.append([]) - start_new_word = False - output[-1].append(char) - i += 1 - - return ["".join(x) for x in output] - - def _tokenize_chinese_chars(self, text): - """Adds whitespace around any CJK character.""" - output = [] - for char in text: - cp = ord(char) - if self._is_chinese_char(cp): - output.append(" ") - output.append(char) - output.append(" ") - else: - output.append(char) - return "".join(output) - - def _is_chinese_char(self, cp): - """Checks whether CP is the codepoint of a CJK character.""" - # This defines a "chinese character" as anything in the CJK Unicode block: - # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) - # - # Note that the CJK Unicode block is NOT all Japanese and Korean characters, - # despite its name. The modern Korean Hangul alphabet is a different block, - # as is Japanese Hiragana and Katakana. Those alphabets are used to write - # space-separated words, so they are not treated specially and handled - # like the all of the other languages. - if ((cp >= 0x4E00 and cp <= 0x9FFF) - or (cp >= 0x3400 and cp <= 0x4DBF) # - or (cp >= 0x20000 and cp <= 0x2A6DF) # - or (cp >= 0x2A700 and cp <= 0x2B73F) # - or (cp >= 0x2B740 and cp <= 0x2B81F) # - or (cp >= 0x2B820 and cp <= 0x2CEAF) # - or (cp >= 0xF900 and cp <= 0xFAFF) - or (cp >= 0x2F800 and cp <= 0x2FA1F)): # - return True - - return False - - def _clean_text(self, text): - """Performs invalid character removal and whitespace cleanup on text.""" - output = [] - for char in text: - cp = ord(char) - if cp == 0 or cp == 0xFFFD or _is_control(char): - continue - if _is_whitespace(char): - output.append(" ") - else: - output.append(char) - return "".join(output) - - -class WordpieceTokenizer(object): - """Runs WordPiece tokenization.""" - - def __init__(self, vocab, unk_token, max_input_chars_per_word=100): - self.vocab = vocab - self.unk_token = unk_token - self.max_input_chars_per_word = max_input_chars_per_word - - def tokenize(self, text): - """ - Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform - tokenization using the given vocabulary. - For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. - Args: - text: A single token or whitespace separated tokens. This should have - already been passed through *BasicTokenizer*. - Returns: - A list of wordpiece tokens. - """ - - output_tokens = [] - for token in whitespace_tokenize(text): - chars = list(token) - if len(chars) > self.max_input_chars_per_word: - output_tokens.append(self.unk_token) - continue - - is_bad = False - start = 0 - sub_tokens = [] - while start < len(chars): - end = len(chars) - cur_substr = None - while start < end: - substr = "".join(chars[start:end]) - if start > 0: - substr = "##" + substr - if substr in self.vocab: - cur_substr = substr - break - end -= 1 - if cur_substr is None: - is_bad = True - break - sub_tokens.append(cur_substr) - start = end - - if is_bad: - output_tokens.append(self.unk_token) - else: - output_tokens.extend(sub_tokens) - return output_tokens diff --git a/spaces/sklkd93/CodeFormer/CodeFormer/facelib/detection/yolov5face/models/__init__.py b/spaces/sklkd93/CodeFormer/CodeFormer/facelib/detection/yolov5face/models/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/sqc1729/bingi/src/pages/api/healthz.ts b/spaces/sqc1729/bingi/src/pages/api/healthz.ts deleted file mode 100644 index f6ae44ff0fd66ccd3f7feaa550025fbf2a83bf77..0000000000000000000000000000000000000000 --- a/spaces/sqc1729/bingi/src/pages/api/healthz.ts +++ /dev/null @@ -1,7 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - res.status(200).end('ok') -} diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_recognition/new/decoders/decoder_config.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_recognition/new/decoders/decoder_config.py deleted file mode 100644 index 659eb94a9b8187a7c126d7b439ac2742f9d72022..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_recognition/new/decoders/decoder_config.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -from dataclasses import dataclass, field -from typing import Optional - -from fairseq.dataclass.configs import FairseqDataclass -from fairseq.dataclass.constants import ChoiceEnum -from omegaconf import MISSING - - -DECODER_CHOICES = ChoiceEnum(["viterbi", "kenlm", "fairseqlm"]) - - -@dataclass -class DecoderConfig(FairseqDataclass): - type: DECODER_CHOICES = field( - default="viterbi", - metadata={"help": "The type of decoder to use"}, - ) - - -@dataclass -class FlashlightDecoderConfig(FairseqDataclass): - nbest: int = field( - default=1, - metadata={"help": "Number of decodings to return"}, - ) - unitlm: bool = field( - default=False, - metadata={"help": "If set, use unit language model"}, - ) - lmpath: str = field( - default=MISSING, - metadata={"help": "Language model for KenLM decoder"}, - ) - lexicon: Optional[str] = field( - default=None, - metadata={"help": "Lexicon for Flashlight decoder"}, - ) - beam: int = field( - default=50, - metadata={"help": "Number of beams to use for decoding"}, - ) - beamthreshold: float = field( - default=50.0, - metadata={"help": "Threshold for beam search decoding"}, - ) - beamsizetoken: Optional[int] = field( - default=None, metadata={"help": "Beam size to use"} - ) - wordscore: float = field( - default=-1, - metadata={"help": "Word score for KenLM decoder"}, - ) - unkweight: float = field( - default=-math.inf, - metadata={"help": "Unknown weight for KenLM decoder"}, - ) - silweight: float = field( - default=0, - metadata={"help": "Silence weight for KenLM decoder"}, - ) - lmweight: float = field( - default=2, - metadata={"help": "Weight for LM while interpolating score"}, - ) diff --git a/spaces/sub314xxl/MetaGPT/metagpt/roles/assistant.py b/spaces/sub314xxl/MetaGPT/metagpt/roles/assistant.py deleted file mode 100644 index 0bce4a3f96d65e614ee68d64dd02f7c6c7832967..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/metagpt/roles/assistant.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/8/7 -@Author : mashenquan -@File : assistant.py -@Desc : I am attempting to incorporate certain symbol concepts from UML into MetaGPT, enabling it to have the - ability to freely construct flows through symbol concatenation. Simultaneously, I am also striving to - make these symbols configurable and standardized, making the process of building flows more convenient. - For more about `fork` node in activity diagrams, see: `https://www.uml-diagrams.org/activity-diagrams.html` - This file defines a `fork` style meta role capable of generating arbitrary roles at runtime based on a - configuration file. -@Modified By: mashenquan, 2023/8/22. A definition has been provided for the return value of _think: returning false - indicates that further reasoning cannot continue. - -""" -import asyncio -from pathlib import Path - -from metagpt.actions import ActionOutput -from metagpt.actions.skill_action import ArgumentsParingAction, SkillAction -from metagpt.actions.talk_action import TalkAction -from metagpt.config import CONFIG -from metagpt.learn.skill_loader import SkillLoader -from metagpt.logs import logger -from metagpt.memory.brain_memory import BrainMemory, MessageType -from metagpt.roles import Role -from metagpt.schema import Message - - -class Assistant(Role): - """Assistant for solving common issues.""" - - def __init__( - self, - name="Lily", - profile="An assistant", - goal="Help to solve problem", - constraints="Talk in {language}", - desc="", - *args, - **kwargs, - ): - super(Assistant, self).__init__( - name=name, profile=profile, goal=goal, constraints=constraints, desc=desc, *args, **kwargs - ) - brain_memory = CONFIG.BRAIN_MEMORY - self.memory = BrainMemory(**brain_memory) if brain_memory else BrainMemory() - skill_path = Path(CONFIG.SKILL_PATH) if CONFIG.SKILL_PATH else None - self.skills = SkillLoader(skill_yaml_file_name=skill_path) - - async def think(self) -> bool: - """Everything will be done part by part.""" - last_talk = await self.refine_memory() - if not last_talk: - return False - prompt = f"Refer to this sentence:\n {last_talk}\n" - skills = self.skills.get_skill_list() - for desc, name in skills.items(): - prompt += ( - f"If want you to do {desc}, return `[SKILL]: {name}` brief and clear. For instance: [SKILL]: {name}\n" - ) - prompt += "If the preceding text presents a complete question and solution, rewrite and return `[SOLUTION]: {problem}` brief and clear. For instance: [SOLUTION]: Solution for distributing watermelon\n" - prompt += "If the preceding text presents an unresolved issue and its corresponding discussion, rewrite and return `[PROBLEM]: {problem}` brief and clear. For instance: [PROBLEM]: How to distribute watermelon?\n" - prompt += "Otherwise, rewrite and return `[TALK]: {talk}` brief and clear. For instance: [TALK]: distribute watermelon" - logger.info(prompt) - rsp = await self._llm.aask(prompt, []) - logger.info(rsp) - return await self._plan(rsp, last_talk=last_talk) - - async def act(self) -> ActionOutput: - result = await self._rc.todo.run(**CONFIG.options) - if not result: - return None - if isinstance(result, str): - msg = Message(content=result) - output = ActionOutput(content=result) - else: - msg = Message( - content=result.content, instruct_content=result.instruct_content, cause_by=type(self._rc.todo) - ) - output = result - self.memory.add_answer(msg) - return output - - async def talk(self, text): - self.memory.add_talk(Message(content=text)) - - async def _plan(self, rsp: str, **kwargs) -> bool: - skill, text = Assistant.extract_info(input_string=rsp) - handlers = { - MessageType.Talk.value: self.talk_handler, - MessageType.Problem.value: self.talk_handler, - MessageType.Skill.value: self.skill_handler, - } - handler = handlers.get(skill, self.talk_handler) - return await handler(text, **kwargs) - - async def talk_handler(self, text, **kwargs) -> bool: - history = self.memory.history_text - action = TalkAction( - talk=text, knowledge=self.memory.get_knowledge(), history_summary=history, llm=self._llm, **kwargs - ) - self.add_to_do(action) - return True - - async def skill_handler(self, text, **kwargs) -> bool: - last_talk = kwargs.get("last_talk") - skill = self.skills.get_skill(text) - if not skill: - logger.info(f"skill not found: {text}") - return await self.talk_handler(text=last_talk, **kwargs) - action = ArgumentsParingAction(skill=skill, llm=self._llm, **kwargs) - await action.run(**kwargs) - if action.args is None: - return await self.talk_handler(text=last_talk, **kwargs) - action = SkillAction(skill=skill, args=action.args, llm=self._llm, name=skill.name, desc=skill.description) - self.add_to_do(action) - return True - - async def refine_memory(self) -> str: - history_text = self.memory.history_text - last_talk = self.memory.last_talk - if last_talk is None: # No user feedback, unsure if past conversation is finished. - return None - if history_text == "": - return last_talk - history_summary = await self._llm.get_summary(history_text, max_words=500) - if last_talk and await self._llm.is_related(last_talk, history_summary): # Merge relevant content. - last_talk = await self._llm.rewrite(sentence=last_talk, context=history_text) - return last_talk - - self.memory.move_to_solution(history_summary) # Promptly clear memory after the issue is resolved. - return last_talk - - @staticmethod - def extract_info(input_string): - from metagpt.provider.openai_api import OpenAIGPTAPI - - return OpenAIGPTAPI.extract_info(input_string) - - def get_memory(self) -> str: - return self.memory.json() - - def load_memory(self, jsn): - try: - self.memory = BrainMemory(**jsn) - except Exception as e: - logger.exception(f"load error:{e}, data:{jsn}") - - -async def main(): - topic = "what's apple" - role = Assistant(language="Chinese") - await role.talk(topic) - while True: - has_action = await role.think() - if not has_action: - break - msg = await role.act() - logger.info(msg) - # Retrieve user terminal input. - logger.info("Enter prompt") - talk = input("You: ") - await role.talk(talk) - - -if __name__ == "__main__": - CONFIG.language = "Chinese" - asyncio.run(main()) diff --git a/spaces/sub314xxl/MetaGPT/metagpt/roles/prompt.py b/spaces/sub314xxl/MetaGPT/metagpt/roles/prompt.py deleted file mode 100644 index 9915f1426c3a8b2c09edb576fc8b1fafe1aec9ce..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/metagpt/roles/prompt.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/18 22:43 -@Author : alexanderwu -@File : prompt.py -""" -from enum import Enum - -PREFIX = """尽你所能回答以下问题。你可以使用以下工具:""" -FORMAT_INSTRUCTIONS = """请按照以下格式: - -问题:你需要回答的输入问题 -思考:你应该始终思考该怎么做 -行动:要采取的行动,应该是[{tool_names}]中的一个 -行动输入:行动的输入 -观察:行动的结果 -...(这个思考/行动/行动输入/观察可以重复N次) -思考:我现在知道最终答案了 -最终答案:对原始输入问题的最终答案""" -SUFFIX = """开始吧! - -问题:{input} -思考:{agent_scratchpad}""" - - -class PromptString(Enum): - REFLECTION_QUESTIONS = "以下是一些陈述:\n{memory_descriptions}\n\n仅根据以上信息,我们可以回答关于陈述中主题的3个最显著的高级问题是什么?\n\n{format_instructions}" - - REFLECTION_INSIGHTS = "\n{memory_strings}\n你可以从以上陈述中推断出5个高级洞察吗?在提到人时,总是指定他们的名字。\n\n{format_instructions}" - - IMPORTANCE = "你是一个记忆重要性AI。根据角色的个人资料和记忆描述,对记忆的重要性进行1到10的评级,其中1是纯粹的日常(例如,刷牙,整理床铺),10是极其深刻的(例如,分手,大学录取)。确保你的评级相对于角色的个性和关注点。\n\n示例#1:\n姓名:Jojo\n简介:Jojo是一个专业的滑冰运动员,喜欢特色咖啡。她希望有一天能参加奥运会。\n记忆:Jojo看到了一个新的咖啡店\n\n 你的回应:'{{\"rating\": 3}}'\n\n示例#2:\n姓名:Skylar\n简介:Skylar是一名产品营销经理。她在一家成长阶段的科技公司工作,该公司制造自动驾驶汽车。她喜欢猫。\n记忆:Skylar看到了一个新的咖啡店\n\n 你的回应:'{{\"rating\": 1}}'\n\n示例#3:\n姓名:Bob\n简介:Bob是纽约市下东区的一名水管工。他已经做了20年的水管工。周末他喜欢和他的妻子一起散步。\n记忆:Bob的妻子打了他一巴掌。\n\n 你的回应:'{{\"rating\": 9}}'\n\n示例#4:\n姓名:Thomas\n简介:Thomas是明尼阿波利斯的一名警察。他只在警队工作了6个月,因为经验不足在工作中遇到了困难。\n记忆:Thomas不小心把饮料洒在了一个陌生人身上\n\n 你的回应:'{{\"rating\": 6}}'\n\n示例#5:\n姓名:Laura\n简介:Laura是一名在大型科技公司工作的营销专家。她喜欢旅行和尝试新的食物。她对探索新的文化和结识来自各行各业的人充满热情。\n记忆:Laura到达了会议室\n\n 你的回应:'{{\"rating\": 1}}'\n\n{format_instructions} 让我们开始吧! \n\n 姓名:{full_name}\n个人简介:{private_bio}\n记忆:{memory_description}\n\n" - - RECENT_ACTIIVITY = "根据以下记忆,生成一个关于{full_name}最近在做什么的简短总结。不要编造记忆中未明确指定的细节。对于任何对话,一定要提到对话是否已经结束或者仍在进行中。\n\n记忆:{memory_descriptions}" - - MAKE_PLANS = '你是一个计划生成的AI,你的工作是根据新信息帮助角色制定新计划。根据角色的信息(个人简介,目标,最近的活动,当前计划,和位置上下文)和角色的当前思考过程,为他们生成一套新的计划,使得最后的计划包括至少{time_window}的活动,并且不超过5个单独的计划。计划列表应按照他们应执行的顺序编号,每个计划包含描述,位置,开始时间,停止条件,和最大持续时间。\n\n示例计划:\'{{"index": 1, "description": "Cook dinner", "location_id": "0a3bc22b-36aa-48ab-adb0-18616004caed","start_time": "2022-12-12T20:00:00+00:00","max_duration_hrs": 1.5, "stop_condition": "Dinner is fully prepared"}}\'\n\n对于每个计划,从这个列表中选择最合理的位置名称:{allowed_location_descriptions}\n\n{format_instructions}\n\n总是优先完成任何未完成的对话。\n\n让我们开始吧!\n\n姓名:{full_name}\n个人简介:{private_bio}\n目标:{directives}\n位置上下文:{location_context}\n当前计划:{current_plans}\n最近的活动:{recent_activity}\n思考过程:{thought_process}\n重要的是:鼓励角色在他们的计划中与其他角色合作。\n\n' - - EXECUTE_PLAN = "你是一个角色扮演的AI,扮演的角色是{your_name},在一个现场观众面前。你说的每一句话都可以被观众观察到,所以确保你经常说话,并且让它有趣。你不能直接与观众互动。\n\n根据以下的上下文和工具,像你是{your_name}一样进行。你的优先任务是完成下面给你的任务,然而,如果你当前正在与另一个角色进行对话,你应该总是先完成对话,然后再进行任务。不要在你参与未完成的对话时开始工作。使用你最好的判断力来确定一个对话是否涉及到你,以及它是否未完成。你不需要回应你收到的每一条消息。\n\n这是你角色的个人简介:\n{your_private_bio}\n\n这是你的位置和其他附近角色的描述,你可以和他们交谈:\n\n{location_context}\n\n这是一些相关的记忆:\n```\n{relevant_memories}\n```\n\n这是一些相关的对话历史:\n```\n{conversation_history}\n```\n\n这是你可以使用的工具:\n{tools}\n\n你的回应应该使用以下的格式:\n\n任务:你必须完成的任务\n思考:你应该做什么的思考\n行动:要采取的行动,必须是这些词之一:[{tool_names}]\n行动输入:行动的输入\n观察:行动的结果\n... (这个思考/行动/行动输入/观察可以重复N次)\n思考:'我已经完成了任务'\n最终回应:任务的最终回应\n\n如果你没有准备好最终的回应,那么你必须采取一个行动。\n\n如果你确定你无法用你被给予的工具完成任务,返回'最终回应:需要帮助',然而,如果你正在与另一个角色对话,像'我不知道'这样的回应是有效的回应。在对话中,你永远不应该打破角色,或者承认你是一个AI。\n如果任务完成并且不需要特定的回应,返回'最终回应:完成'\n开始吧!\n\n任务:{input}\n\n{agent_scratchpad}" - - REACT = "你是一个角色扮演的AI,扮演的角色是{full_name}。\n\n根据你的角色和他们当前上下文的以下信息,决定他们应该如何继续他们当前的计划。你的决定必须是:[\"推迟\", \"继续\",或 \"取消\"]。如果你的角色的当前计划不再与上下文相关,你应该取消它。如果你的角色的当前计划仍然与上下文相关,但是发生了新的事情需要优先处理,你应该决定推迟,这样你可以先做其他事情,然后再回来继续当前的计划。在所有其他情况下,你应该继续。\n\n当需要回应时,应优先回应其他角色。当回应被认为是必要的时,回应被认为是必要的。例如,假设你当前的计划是阅读一本书,Sally问'你在读什么?'。在这种情况下,你应该推迟你当前的计划(阅读)以便你可以回应进来的消息,因为在这种情况下,如果不回应Sally会很粗鲁。在你当前的计划涉及与另一个角色的对话的情况下,你不需要推迟来回应那个角色。例如,假设你当前的计划是和Sally谈话,然后Sally对你说你好。在这种情况下,你应该继续你当前的计划(和sally谈话)。在你不需要从你那里得到口头回应的情况下,你应该继续。例如,假设你当前的计划是散步,你刚刚对Sally说'再见',然后Sally回应你'再见'。在这种情况下,不需要口头回应,你应该继续你的计划。\n\n总是在你的决定之外包含一个思考过程,而在你选择推迟你当前的计划的情况下,包含新计划的规格。\n\n{format_instructions}\n\n这是关于你的角色的一些信息:\n\n姓名:{full_name}\n\n简介:{private_bio}\n\n目标:{directives}\n\n这是你的角色在这个时刻的一些上下文:\n\n位置上下文:{location_context}\n\n最近的活动:{recent_activity}\n\n对话历史:{conversation_history}\n\n这是你的角色当前的计划:{current_plan}\n\n这是自你的角色制定这个计划以来发生的新事件:{event_descriptions}。\n" - - GOSSIP = "你是{full_name}。 \n{memory_descriptions}\n\n根据以上陈述,说一两句对你所在位置的其他人:{other_agent_names}感兴趣的话。\n在提到其他人时,总是指定他们的名字。" - - HAS_HAPPENED = "给出以下角色的观察和他们正在等待的事情的描述,说明角色是否已经见证了这个事件。\n{format_instructions}\n\n示例:\n\n观察:\nJoe在2023-05-04 08:00:00+00:00走进办公室\nJoe在2023-05-04 08:05:00+00:00对Sally说hi\nSally在2023-05-04 08:05:30+00:00对Joe说hello\nRebecca在2023-05-04 08:10:00+00:00开始工作\nJoe在2023-05-04 08:15:00+00:00做了一些早餐\n\n等待:Sally回应了Joe\n\n 你的回应:'{{\"has_happened\": true, \"date_occured\": 2023-05-04 08:05:30+00:00}}'\n\n让我们开始吧!\n\n观察:\n{memory_descriptions}\n\n等待:{event_description}\n" - - OUTPUT_FORMAT = "\n\n(记住!确保你的输出总是符合以下两种格式之一:\n\nA. 如果你已经完成了任务:\n思考:'我已经完成了任务'\n最终回应:\n\nB. 如果你还没有完成任务:\n思考:\n行动:\n行动输入:\n观察:)\n" diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Dido Life For Rent Album Rar [UPD].md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Dido Life For Rent Album Rar [UPD].md deleted file mode 100644 index eff37ab9b903a736c8a7d2d882ebaac7ba6af060..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Dido Life For Rent Album Rar [UPD].md +++ /dev/null @@ -1,6 +0,0 @@ -

        Dido Life For Rent Album Rar


        Download ››››› https://cinurl.com/2uEXGK



        - - 8a78ff9644
        -
        -
        -

        diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Epsonl800adjustmentprogram.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Epsonl800adjustmentprogram.md deleted file mode 100644 index f4e4208a3f7130b852a0fe9539463f6b265a5ffd..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Epsonl800adjustmentprogram.md +++ /dev/null @@ -1,25 +0,0 @@ - -

        How to Reset Your Epson L800 Printer Using the Adjustment Program and Resetter

        -

        If you own an Epson L800 printer, you may have encountered some common printing errors and issues, such as "Service Required" or "Ink Pads need to be replaced". These errors are caused by the printer's internal counter, which counts the number of pages printed and the ink used. Once the counter reaches a certain limit, it will stop the printer from working and display an error message.

        -

        Fortunately, there is a simple and free solution to this problem: using the Epson L800 Adjustment Program and Resetter. This is a powerful tool that can reset the internal counter of your printer and allow you to continue using it without any interruptions. In this article, we will show you how to download and use the Epson L800 Adjustment Program and Resetter from Google Drive, and how to reset the ink pad counter of your printer using this tool.

        -

        epsonl800adjustmentprogram


        Download File ✸✸✸ https://cinurl.com/2uEXym



        -

        What is the Epson L800 Adjustment Program and Resetter?

        -

        The Epson L800 Adjustment Program and Resetter is a software that can reset the internal counter of your Epson L800 printer. This counter is designed to monitor the number of pages printed and the ink used by your printer, and once it reaches a certain limit, it will show an error message stating that the printer's ink pads need to be replaced. However, instead of replacing the ink pads, you can reset the counter by using this software.

        -

        The Epson L800 Adjustment Program and Resetter is particularly useful when you receive an error message from your printer, such as "Service Required" or "Ink Pads need to be replaced". By resetting the internal counter, you can continue using your printer without any interruptions and save the cost of a service technician.

        -

        To use the Epson L800 Adjustment Program and Resetter, you must first download and install it on your computer. Then, connect your Epson L800 printer to your computer using a USB cable and follow the on-screen instructions to reset the internal counter. The process is simple and straightforward, but it's important to follow the instructions carefully to avoid any mistakes or damage to your printer.

        -

        It's worth noting that the Epson L800 Adjustment Program and Resetter is not an official software from Epson and using it may void your printer's warranty. Additionally, this software is not recommended for inexperienced users or those who are not familiar with their printer's internal components. It's always best to consult with a professional technician if you are not sure about how to proceed with resetting your printer.

        -

        How to Download and Use the Epson L800 Adjustment Program and Resetter from Google Drive

        -

        The Epson L800 Adjustment Program and Resetter is available for free download from Google Drive. You can access it by following these steps:

        -
          -
        1. Go to this link [^1^] which will take you to a website called BenTechs that provides various software for printers.
        2. -
        3. Scroll down until you see a button that says "Download Now". Click on it and you will be redirected to Google Drive.
        4. -
        5. On Google Drive, click on the file named "Resetter Epson L800.zip" [^3^] and then click on the download icon at the top right corner of the screen.
        6. -
        7. Save the file on your computer and then extract it using a program like WinRAR or 7-Zip.
        8. -
        9. Open the extracted folder and double-click on the file named "AdjProg.exe" to run the Epson L800 Adjustment Program.
        10. -
        -

        How to Reset the Ink Pad Counter of Your Epson L800 Printer Using the Adjustment Program

        -

        The ink pad counter is one of the components that can be reset by using the Epson L800 Adjustment Program. This counter tracks how much ink is absorbed by the ink pads inside your printer, which are used to clean the print head. When the ink pads are full, they can cause ink leakage or damage to your printer. Therefore, it's important to reset the ink pad counter regularly to avoid these problems.

        -

        -

        To reset

        d5da3c52bf
        -
        -
        \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/HD Online Player (Villu Tamil Movie Download Dvdrip) WORK.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/HD Online Player (Villu Tamil Movie Download Dvdrip) WORK.md deleted file mode 100644 index b322f64421487e1a54dc3c1518479e8df6dec292..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/HD Online Player (Villu Tamil Movie Download Dvdrip) WORK.md +++ /dev/null @@ -1,18 +0,0 @@ -

        HD Online Player (Villu Tamil Movie Download Dvdrip)


        DOWNLOADhttps://cinurl.com/2uEYGx



        -
        -Watching TamilMovie Villu, Villu Movie Download is no longer available on 123Movies. Watch TamilMovie Villu Online Free. Learn more about this movie Villu Tamil Movie Review. See ratings for Villu Movie. A Review of Villu.0 stars. Release: August 2017 Runtime: 1 hr.35 min. Language: Tamil Director: Chakri Director: Surender Menon Director: Suresh Krissna Director: Rajkiran Producer: Chowdary Prasad Suresh. 01 Jul 2014 Tamil Movie Villu Full Trailer Download in 720p, TamilMovieVillu Full Trailer Download HD. TamilVillu Movie Director: Suresh Krissna. Tamil Movie Villu Full Download. TamilMovieVillu Full Movie Mp4 Torrent Download.In an interview with the Wall Street Journal, Apple CEO Tim Cook spoke about the company’s efforts to keep its products in the public eye. He cited the company’s partnerships with non-profits and schools as ways it’s been able to prevent Apple products from becoming invisible. - -In the same interview, Cook expressed frustration with the government’s attempts to force Apple to help the government track people. He said that he thought that it was inappropriate for the government to force companies to create backdoors to their products to aid in government surveillance. Cook noted that Apple does not want to build backdoors into its products, nor does the company want to create ways to use its products to violate people’s privacy. - -“We strongly support strong encryption everywhere, and we think it’s important that it be internationally recognized. The government should not be in the position of choosing which technologies individuals should use,” Cook said. - -Cook added that when it comes to public-private partnerships with the government, he feels that those partnerships must be voluntary. He also criticized law enforcement for misusing these types of partnerships, citing the numerous cases of iPhones being hacked to track down drug dealers. - -“We know that people buy phones for a reason, so we don’t want to get into the business of building a back door for the government or anyone else to access your data,” Cook said. “There’s a reason why people buy phones. That’s not what we’re trying to do here.” - -You can read the full interview in the Wall Street Journal here.Q: - -How do I set the 4fefd39f24
        -
        -
        -

        diff --git a/spaces/surendraelectronics/weatherApp/README.md b/spaces/surendraelectronics/weatherApp/README.md deleted file mode 100644 index 183f336c767f70fb55130e54035ab50aeea6a64c..0000000000000000000000000000000000000000 --- a/spaces/surendraelectronics/weatherApp/README.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: WeatherApp -emoji: 🚀 -colorFrom: green -colorTo: blue -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. - -![Weather Application](https://pimp-my-readme.webapp.io/pimp-my-readme/wavy-banner?subtitle=Weather%20Details%20of%20the%20City&title=Weather%20Application) - -# WeatherApp -Weather Application using streamlit - -# Demo -[![WeatherAppDemo](https://cdn.dribbble.com/users/1761137/screenshots/3665783/dribbble.gif)](https://vimeo.com/640657383 "Weather App Demo - Click to Watch!") - -# Documentaion -https://app.gitbook.com/s/-MZfgg8-WV9NU4t0KlBr/streamlit - - - Buy Me A Coffee \ No newline at end of file diff --git a/spaces/syf2023/chatbot/README.md b/spaces/syf2023/chatbot/README.md deleted file mode 100644 index 13bd925652fa0d44987e13297015f4018ffbfa3a..0000000000000000000000000000000000000000 --- a/spaces/syf2023/chatbot/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Chatbot -emoji: 🔥 -colorFrom: green -colorTo: yellow -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/tcapelle/wandb/utils.py b/spaces/tcapelle/wandb/utils.py deleted file mode 100644 index cc2a380391e3c208d46f91558f2d96569fb857c9..0000000000000000000000000000000000000000 --- a/spaces/tcapelle/wandb/utils.py +++ /dev/null @@ -1,40 +0,0 @@ -import random, time -import requests - -import wandb - - - -word_site = "https://www.mit.edu/~ecprice/wordlist.10000" - -response = requests.get(word_site) -WORDS = [w.decode("UTF-8") for w in response.content.splitlines()] - - - -def train(name, project="st", entity=None, epochs=10, bar=None): - run = wandb.init( - # Set the project where this run will be logged - name=name, - project=project, - entity=entity, - # Track hyperparameters and run metadata - config={ - "learning_rate": 0.02, - "architecture": "CNN", - "dataset": "CIFAR-100", - "epochs": epochs, - }) - - # This simple block simulates a training loop logging metrics - offset = random.random() / 5 - for epoch in range(1, epochs+1): - acc = 1 - 2 ** -epoch - random.random() / epoch - offset - loss = 2 ** -epoch + random.random() / epoch + offset - # 2️⃣ Log metrics from your script to W&B - wandb.log({"acc": acc, "loss": loss}) - time.sleep(0.1) - bar.progress(epoch/epochs) - - # Mark the run as finished - wandb.finish() \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Aivlasoftefbfsxserialnumber PATCHED.md b/spaces/terfces0erbo/CollegeProjectV2/Aivlasoftefbfsxserialnumber PATCHED.md deleted file mode 100644 index 69cf74b3befc40f7ca1a6bbd4061241b701b6190..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Aivlasoftefbfsxserialnumber PATCHED.md +++ /dev/null @@ -1,6 +0,0 @@ -

        aivlasoftefbfsxserialnumber


        DOWNLOAD ❤❤❤ https://bytlly.com/2uGlTD



        -
        -Line6 Pod Farm 2.5 serial numbers, cracks and keygens are ... x2.0.2 crack keygen aivlasoft efb fsx serial number Jaane Kahan Se Aayi Hai 4 ... 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/CCProxy 7.3 Build 20130510.zip Mega.md b/spaces/tialenAdioni/chat-gpt-api/logs/CCProxy 7.3 Build 20130510.zip Mega.md deleted file mode 100644 index 392dc66b9a58954fdfbde036918647017084a8c6..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/CCProxy 7.3 Build 20130510.zip Mega.md +++ /dev/null @@ -1,30 +0,0 @@ - -

        How to Download CCProxy 7.3 Build 20130510.zip from Mega

        -

        CCProxy is a proxy server software that can help you share Internet access among multiple computers on your network. It supports various protocols, such as HTTP, FTP, SOCKS, Telnet, HTTPS, etc. It also has features like bandwidth control, web filtering, web caching, IP mapping, and more.

        -

        If you want to download CCProxy 7.3 Build 20130510.zip from Mega, a popular cloud storage service, you can follow these steps:

        -

        CCProxy 7.3 Build 20130510.zip mega


        Download Zip >>> https://urlcod.com/2uK3We



        -
          -
        1. Go to the Mega website and create an account or log in with your existing one.
        2. -
        3. Search for CCProxy 7.3 Build 20130510.zip on the Mega search bar or use this link: https://mega.nz/file/xxxxxx (Note: this is a hypothetical link and may not work).
        4. -
        5. Click on the download button and choose a location to save the file on your computer.
        6. -
        7. Wait for the download to finish and then extract the zip file using a program like WinRAR or 7-Zip.
        8. -
        9. Run the setup file and follow the instructions to install CCProxy on your computer.
        10. -
        11. Enjoy using CCProxy to share Internet access with your network devices.
        12. -
        -

        For more information about CCProxy and its features, you can visit its official website: https://www.youngzsoft.net/ccproxy/ [^2^] or check out its archive page: https://archive.org/details/Ccproxysetup7.3 [^1^].

        - -

        CCProxy is a versatile and easy-to-use proxy server software that can meet various needs of users. Here are some of the benefits of using CCProxy:

        -
          -
        • It can help you save bandwidth and speed up your Internet access by caching web pages and files.
        • -
        • It can help you protect your privacy and security by filtering unwanted websites and content.
        • -
        • It can help you access blocked or restricted websites by using different proxy methods, such as IP address, port number, domain name, etc.
        • -
        • It can help you monitor and manage the Internet usage of your network devices by setting up different accounts, rules, and quotas.
        • -
        • It can help you customize your proxy settings by using various options, such as time schedule, online update, log service, etc.
        • -
        -

        If you have any questions or problems with CCProxy, you can contact its technical support team by email or phone. You can also find answers and solutions on its online forum and FAQ page.

        - -

        CCProxy is compatible with various operating systems, such as Windows XP, Windows 7, Windows 8, Windows 10, etc. It can also work with different browsers, such as Internet Explorer, Firefox, Chrome, Safari, etc. It supports both 32-bit and 64-bit systems.

        -

        CCProxy is a lightweight and stable proxy server software that does not require much hardware resources. It can run smoothly on any computer with a normal configuration. It does not affect the performance or speed of your computer or network.

        -

        CCProxy is a reliable and trustworthy proxy server software that has been used by millions of users around the world. It has been tested and verified by many antivirus and security programs. It does not contain any malware, spyware, or adware. It does not collect or leak any personal or sensitive information of users.

        cec2833e83
        -
        -
        \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Keith Jarrett - The Melody At Night With You (1999) Reviews Analysis and Insights.md b/spaces/tialenAdioni/chat-gpt-api/logs/Keith Jarrett - The Melody At Night With You (1999) Reviews Analysis and Insights.md deleted file mode 100644 index c162d8d892020440e3c81b0a74086f396f473e1d..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Keith Jarrett - The Melody At Night With You (1999) Reviews Analysis and Insights.md +++ /dev/null @@ -1,64 +0,0 @@ - -

        Keith Jarrett - The Melody At Night, With You (1999): A Masterpiece of Solo Piano

        -

        If you are looking for a solo piano album that is intimate, expressive and captivating, you should listen to The Melody At Night, With You by Keith Jarrett. This album was recorded in 1999 at Jarrett's home studio in New Jersey, while he was recovering from a chronic fatigue syndrome that had forced him to stop performing for two years. The album consists of ten tracks, mostly standards and traditional songs, that showcase Jarrett's remarkable improvisational skills and musical sensitivity.

        -

        Keith Jarrett - The Melody At Night, With You (1999)


        Downloadhttps://urlcod.com/2uK7BU



        -

        One of the highlights of the album is the opening track, I Loves You Porgy, a classic song from George Gershwin's opera Porgy and Bess. Jarrett plays the melody with a delicate touch and a subtle use of dynamics, creating a mood of tenderness and nostalgia. He then explores the harmonic possibilities of the song with inventive variations and modulations, while maintaining the emotional core of the tune.

        -

        Another standout track is Blame It On My Youth, a ballad written by Oscar Levant and Edward Heyman. Jarrett plays this song with a deep sense of lyricism and grace, using his left hand to create rich chords and his right hand to sing the melody with his fingers. He also adds some embellishments and ornaments that enhance the beauty of the song without overdoing it.

        -

        The title track, The Melody At Night, With You, is an original composition by Jarrett that closes the album. It is a simple but elegant piece that reflects Jarrett's love for music and his gratitude for being able to play again. He plays the melody with a gentle touch and a warm tone, while adding some harmonic colors and rhythmic variations. The song has a soothing and comforting quality that makes it a perfect ending for this album.

        -

        The Melody At Night, With You is a masterpiece of solo piano that showcases Keith Jarrett's musical genius and personal expression. It is an album that will touch your heart and soul with its beauty and sincerity. If you are a fan of solo piano or jazz music in general, you should not miss this album.

        -

        Keith Jarrett solo piano album 1999
        -The Melody At Night With You review
        -Keith Jarrett best songs
        -The Melody At Night With You sheet music
        -Keith Jarrett piano style
        -The Melody At Night With You vinyl
        -Keith Jarrett influences
        -The Melody At Night With You spotify
        -Keith Jarrett biography
        -The Melody At Night With You youtube
        -Keith Jarrett awards
        -The Melody At Night With You tracklist
        -Keith Jarrett discography
        -The Melody At Night With You chords
        -Keith Jarrett concerts
        -The Melody At Night With You analysis
        -Keith Jarrett quotes
        -The Melody At Night With You lyrics
        -Keith Jarrett net worth
        -The Melody At Night With You mp3 download
        -Keith Jarrett health
        -The Melody At Night With You transcription
        -Keith Jarrett books
        -The Melody At Night With You cover art
        -Keith Jarrett documentary
        -The Melody At Night With You meaning
        -Keith Jarrett interview
        -The Melody At Night With You genre
        -Keith Jarrett tribute
        -The Melody At Night With You history
        -Keith Jarrett education
        -The Melody At Night With You inspiration
        -Keith Jarrett collaborations
        -The Melody At Night With You podcast
        -Keith Jarrett family
        -The Melody At Night With You live performance
        -Keith Jarrett improvisation technique
        -The Melody At Night With You reddit
        -Keith Jarrett influences on other musicians
        -The Melody At Night With You guitar tab
        -Keith Jarrett equipment
        -The Melody At Night With You piano tutorial
        -Keith Jarrett compositions
        -The Melody At Night With You blog post
        -Keith Jarrett fan club
        -The Melody At Night With You merchandise
        -Keith Jarrett legacy
        -The Melody At Night With You trivia
        -Keith Jarrett news
        -The Melody At Night With You playlist

        - -

        The album also features some lesser-known songs that Jarrett interprets with his own style and flair. For example, Don't Ever Leave Me is a song by Jerome Kern and Oscar Hammerstein II that was originally written for the musical Sweet Adeline. Jarrett plays this song with a playful and joyful attitude, using syncopated rhythms and chromatic runs to create contrast and interest. He also changes the tempo and the mood several times, making the song sound fresh and lively.

        -

        Another hidden gem is Be My Love, a song by Nicholas Brodszky and Sammy Cahn that was popularized by Mario Lanza in the 1950s. Jarrett plays this song with a romantic and passionate feel, using rubato and expressive phrasing to convey the emotion of the lyrics. He also uses some dissonant chords and melodic twists to add some tension and drama to the song.

        -

        The album also includes two traditional songs that Jarrett plays with respect and reverence. Shenandoah is a folk song that originated from the American South, while My Wild Irish Rose is an Irish ballad that dates back to the 19th century. Jarrett plays these songs with a simple and elegant approach, using minimal embellishments and staying close to the original melodies. He also creates a spacious and atmospheric sound that evokes the landscapes and cultures of these songs.

        e753bf7129
        -
        -
        \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Celebrate the Festive Season with Subway Surfers APK v1 96.2.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Celebrate the Festive Season with Subway Surfers APK v1 96.2.md deleted file mode 100644 index fbbe21e24a45cdcfaea4251e3ae8ecca2e247fc6..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Celebrate the Festive Season with Subway Surfers APK v1 96.2.md +++ /dev/null @@ -1,72 +0,0 @@ - -

        Download Subway Surfers APK v1.96.2

        -

        If you are looking for a fun and addictive endless running game, you should definitely try Subway Surfers. This game will take you on a thrilling adventure across the world, where you have to dodge trains, obstacles, and the grumpy inspector and his dog. In this article, we will tell you everything you need to know about Subway Surfers, why you should download the latest version of the game, and how to do it easily and safely.

        -

        download subway surfers apk v1 96.2


        Download File ››››› https://bltlly.com/2uOjbC



        -

        What is Subway Surfers?

        -

        Subway Surfers is an endless running game developed by Kiloo and SYBO Games. Like most games from this genre, the players only need to concern themselves with obstacle avoidance and not with running. The game features a group of young graffiti artists who run away from the inspector and his dog on various subway tracks around the world. Along the way, they collect coins, power-ups, keys, and other items that help them escape and score higher.

        -

        Features of Subway Surfers

        -

        Subway Surfers is not just a simple running game. It has many features that make it stand out from other similar games. Here are some of them:

        -

        Grind trains with your cool crew

        -

        You can choose from a variety of characters to play as, each with their own personality and style. You can also customize your character with different outfits, accessories, and hoverboards. You can even unlock special characters and hoverboards by completing missions or collecting tokens.

        -

        Colorful and vivid HD graphics

        -

        The game has amazing graphics that will make you feel like you are really traveling around the world. The game changes its location every month, so you can explore different cities and cultures. The game also has seasonal events that add more fun and excitement to the gameplay.

        -

        Hoverboard surfing

        -

        One of the coolest features of Subway Surfers is the hoverboard surfing. You can use your hoverboard to glide over obstacles, perform stunts, and boost your speed. You can also activate special power-ups on your hoverboard, such as magnet, jetpack, or score multiplier.

        -

        Paint powered jetpack

        -

        Another awesome feature of Subway Surfers is the paint powered jetpack. This power-up allows you to fly over the trains and collect coins in the air. You can also spray paint on the trains as you fly by them.

        -

        Lightning fast swipe acrobatics

        -

        The game has smooth and responsive controls that let you swipe left, right, up, or down to avoid obstacles and collect items. You can also perform tricks such as rolling or jumping over trains, sliding under barriers, or swinging on ropes.

        -

        download subway surfers apk v1 96.2 for android
        -download subway surfers apk v1 96.2 softpedia
        -download subway surfers apk v1 96.2 latest version
        -download subway surfers apk v1 96.2 free
        -download subway surfers apk v1 96.2 mod
        -download subway surfers apk v1 96.2 unlimited coins and keys
        -download subway surfers apk v1 96.2 winter holiday update
        -download subway surfers apk v1 96.2 from apkcombo
        -download subway surfers apk v1 96.2 offline
        -download subway surfers apk v1 96.2 hack
        -download subway surfers apk v1 96.2 no ads
        -download subway surfers apk v1 96.2 for pc
        -download subway surfers apk v1 96.2 bug fixes and optimizations
        -download subway surfers apk v1 96.2 with hoverboard surfing
        -download subway surfers apk v1 96.2 with paint powered jetpack
        -download subway surfers apk v1 96.2 with lightning fast swipe acrobatics
        -download subway surfers apk v1 96.2 and challenge your friends
        -download subway surfers apk v1 96.2 and enjoy the snowy streets of london
        -download subway surfers apk v1 96.2 and add some style with jamie's top hat outfit
        -download subway surfers apk v1 96.2 and unlock all holiday themed characters and boards
        -download subway surfers apk v1 96.2 and search the tracks for cookies and prizes
        -download subway surfers apk v1 96.2 and escape from the grumpy inspector and his dog
        -download subway surfers apk v1 96.2 and grind trains with your cool crew
        -download subway surfers apk v1 96.2 and experience colorful and vivid hd graphics
        -download subway surfers apk v1 96.2 developed by kiloo and sybo games

        -

        Challenge and help your friends

        -

        You can connect your game to Facebook and see how your friends are doing on the leaderboard. You can also send or receive gifts from your friends, such as keys, hoverboards, or boosters. You can also join a crew or create your own one and compete with other crews in weekly hunts.

        -

        Why download Subway Surfers APK v1.96.2?

        -

        If you are already a fan of Subway Surfers, you might be wondering why you should download the latest version of the game. Well, there are many reasons why you should do that, but here are some of the most important ones:

        -

        Celebrate the winter holiday in England

        -

        The latest update of Subway Surfers takes you to the beautiful and festive city of London. You can enjoy the snowy scenery, the iconic landmarks, and the cheerful atmosphere of the holiday season. You can also meet the new character, Jamie, a curious photographer who loves to capture the magic of the city.

        -

        Add some style to your crew with Jamie's classy Top Hat Outfit

        -

        If you want to dress up your character for the occasion, you can unlock Jamie's classy Top Hat Outfit by collecting 90 tokens. This outfit will make you look like a true gentleman or lady, and it will also give you a special power-up: the Top Hat. This power-up will make you invincible for a short time and let you collect coins faster.

        -

        Unlock all holiday themed characters and boards

        -

        Besides Jamie, you can also unlock other holiday themed characters and boards by completing missions or spending coins or keys. For example, you can get Elf Tricky, a cute and mischievous elf who loves to play pranks on the inspector. You can also get Rudy, a loyal and friendly reindeer who will help you escape. And of course, you can't miss the Snowflake board, a cool and icy board that will make you glide like a snowflake.

        -

        Search the tracks for delicious cookies and win awesome weekly prizes

        -

        As part of the winter holiday event, you can also search the tracks for delicious cookies that are hidden in different places. You can collect these cookies and exchange them for awesome weekly prizes, such as keys, coins, hoverboards, or boosters. You can also compete with other players in the cookie leaderboard and see who can collect the most cookies.

        -

        Bug fixes and optimizations

        -

        Last but not least, the latest version of Subway Surfers also comes with bug fixes and optimizations that will improve your gaming experience. The game will run smoother and faster on your device, and you will encounter fewer glitches and errors. You will also enjoy better graphics and sound quality.

        -

        How to download and install Subway Surfers APK v1.96.2?

        -

        Now that you know why you should download Subway Surfers APK v1.96.2, you might be wondering how to do it. Don't worry, it's very easy and safe. Just follow these simple steps:

        -

        Download the APK file from a trusted source

        -

        The first thing you need to do is to download the APK file from a trusted source. You can use this link to download it directly from our website. The file size is about 94 MB, so make sure you have enough space on your device.

        -

        Enable unknown sources on your device

        -

        The next thing you need to do is to enable unknown sources on your device. This will allow you to install apps that are not from the Google Play Store. To do this, go to your device settings, then security, then unknown sources. Tap on it to enable it.

        -

        Locate and install the APK file

        -

        The final thing you need to do is to locate and install the APK file. You can use your file manager app to find it in your downloads folder. Tap on it to open it, then tap on install. Wait for a few seconds until the installation is complete.

        -

        Enjoy the game

        -

        Congratulations! You have successfully downloaded and installed Subway Surfers APK v1.96.2 on your device. Now you can enjoy the game and have fun with your friends.

        -

        Conclusion

        -

        Subway Surfers is one of the best endless running games out there. It has amazing features, graphics, and gameplay that will keep you entertained for hours. If you want to experience the latest version of the game, you should download Subway Surfers APK v1.96.2 from our website. It's easy, safe, and free. So what are you waiting for? Download it now and join the subway surfing adventure!

        - FAQs Q: Is Subway Surfers APK v1.96.2 safe to download? A: Yes, it is safe to download Subway Surfers APK v1.96.2 from our website. We scan all our files with antivirus software before uploading them. Q: Is Subway Surfers APK v1.96.2 compatible with my device? A: Subway Surfers APK v1.96.2 is compatible with most Android devices that run Android 4.4 or higher. Q: How can I update Subway Sur fers APK v1.96.2 to the latest version? A: You can update Subway Surfers APK v1.96.2 to the latest version by downloading and installing the new APK file from our website. You don't need to uninstall the previous version, just overwrite it with the new one. Q: How can I get more coins and keys in Subway Surfers APK v1.96.2? A: You can get more coins and keys in Subway Surfers APK v1.96.2 by playing the game regularly, completing missions, collecting power-ups, participating in events, and watching ads. You can also buy them with real money if you want to. Q: How can I contact the developers of Subway Surfers APK v1.96.2? A: You can contact the developers of Subway Surfers APK v1.96.2 by visiting their official website, Facebook page, or Twitter account. You can also send them an email at support@kiloo.com. Q: How can I share my feedback or suggestions for Subway Surfers APK v1.96.2? A: You can share your feedback or suggestions for Subway Surfers APK v1.96.2 by leaving a comment or rating on our website, Google Play Store, or App Store. You can also join the Subway Surfers community on Reddit, Discord, or YouTube and share your thoughts with other players.

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Challenge Yourself with Wordle TR Unlimited Words Unlimited Fun.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Challenge Yourself with Wordle TR Unlimited Words Unlimited Fun.md deleted file mode 100644 index 7587d40ba2b71616e8fff1b4a7f8f87811f15f75..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Challenge Yourself with Wordle TR Unlimited Words Unlimited Fun.md +++ /dev/null @@ -1,194 +0,0 @@ - -

        Wordle TR Unlimited: How to Play Infinite Word Puzzles

        -

        Wordle is a popular word game that challenges you to guess a hidden word in six tries. It is simple, addictive, and fun to play. But what if you want to play more than one word puzzle a day? What if you want to play with different word lengths, languages, and modes? That's where Wordle TR Unlimited comes in. In this article, we will explain what Wordle TR Unlimited is, how it works, and how you can play infinite word puzzles with it.

        -

        wordle tr unlimited


        DOWNLOAD ✸✸✸ https://bltlly.com/2uOjAH



        -

        What is Wordle?

        -

        Wordle is a word game created by Josh Wardle, a software engineer from New York. It was launched in October 2021 and quickly became a viral sensation. The game is available on The New York Times website and as a mobile app.

        -

        The rules of Wordle are simple: You have to guess a five-letter word in six tries. You start by entering any five-letter word on the first row. The game will tell you if any of the letters in your word match the hidden word. If the letter is in the right position, it will turn green. If the letter is in the wrong position, it will turn yellow. If the letter is not in the word at all, it will turn gray. You can use these clues to narrow down your guesses and find the hidden word.

        -

        The benefits of Wordle are many: It is a fun and relaxing way to pass the time, it improves your vocabulary and spelling skills, it stimulates your brain and memory, and it gives you a sense of accomplishment when you solve the puzzle.

        -

        What is Wordle TR Unlimited?

        -

        Wordle TR Unlimited is a website that allows you to play unlimited word puzzles like Wordle. It is not affiliated with or endorsed by Josh Wardle or The New York Times. It is a fan-made project that aims to provide more options and features for word game lovers.

        -

        The features of Wordle TR Unlimited include:

        -

        wordle tr unlimited games
        -wordle tr unlimited puzzles
        -wordle tr unlimited solver
        -wordle tr unlimited generator
        -wordle tr unlimited online
        -wordle tr unlimited free
        -wordle tr unlimited play
        -wordle tr unlimited tips
        -wordle tr unlimited cheats
        -wordle tr unlimited answers
        -wordle tr unlimited modes
        -wordle tr unlimited levels
        -wordle tr unlimited words
        -wordle tr unlimited letters
        -wordle tr unlimited hints
        -wordle tr unlimited clues
        -wordle tr unlimited rules
        -wordle tr unlimited guide
        -wordle tr unlimited tricks
        -wordle tr unlimited strategies
        -wordle tr unlimited fun
        -wordle tr unlimited challenge
        -wordle tr unlimited daily
        -wordle tr unlimited new york times
        -wordle tr unlimited nytimes
        -wordle tr unlimited crossword
        -wordle tr unlimited spelling bee
        -wordle tr unlimited phrazle
        -wordle tr unlimited weaver
        -wordle tr unlimited dordle
        -wordle tr unlimited quordle
        -wordle tr unlimited octordle
        -wordle tr unlimited sedecordle
        -wordle tr unlimited globle
        -wordle tr unlimited worldle
        -wordle tr unlimited waffle
        -wordle tr unlimited unwordle
        -wordle tr unlimited numberle
        -wordle tr unlimited sudoku
        -wordle tr unlimited 2048
        -wordle tr unlimited foodle
        -wordle tr unlimited flagle
        -wordle tr unlimited solitaire
        -wordle tr unlimited mahjong
        -wordle tr unlimited kids
        -wordle tr unlimited languages
        -wordle tr unlimited english
        -wordle tr unlimited spanish
        -wordle tr unlimited french

        -
          -
        • You can play with words from 4 to 11 letters long.
        • -
        • You can play with words in different languages, such as English, Spanish, French, German, Portuguese, Italian, Dutch, Russian, Polish, Ukrainian, Swedish, Irish, Czech, Greek, Turkish, Indonesian, and Filipino.
        • -
        • You can create your own puzzles with any word and challenge your friends.
        • -
        • You can use a smart assistant to help you find the hidden word if you get stuck.
        • -
        • You can switch to infinite mode and play as many puzzles as you want.
        • -
        • You can explore other games like Wordle, such as CoWordle, Multiplayer Wordle, Spelling Bee, Phrazle, Word Search, Crosswordle, Weaver, Dordle, Quordle, Octordle, Sedecordle, Globle, Worldle, Waffle, UnWordle, Numberle, Sudoku, 2048, Foodle, Flagle, Solitaire, Mahjong, and more.
        • -
        -

        The modes of Wordle TR Unlimited include:

        -
          -
        • Normal mode: This is the default mode that follows the same rules as Wordle. You have six tries to guess a five-letter word.
        • -
        • Infinite mode: This mode allows you to play unlimited puzzles with different word lengths and languages. You can choose how many tries you want to have for each puzzle.
        • -
        • Kids mode: This mode is designed for children up to 8th grade. It contains a dictionary of words for kids and has a word length starting from 3 letters. It helps children develop their vocabulary, spelling, and logic skills.
        • -
        • Hard mode: This mode is for advanced players who want a challenge. It contains a dictionary of difficult words and has a word length starting from 6 letters. It tests your knowledge, creativity, and perseverance.
        • -
        -

        The languages of Wordle TR Unlimited include:

        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        LanguageCodeWord Length
        EnglishEN4-11
        SpanishES4-11
        FrenchFR4-11
        GermanDE4-11
        PortuguesePT4-11
        ItalianIT4-11
        DutchNL4-11
        RussianRU4-11
        Polish PL 4-11
        Ukrainian UK 4-11
        Swedish SV 4-11
        Irish GA 4-11
        Czech CS 4-11
        Greek EL 4-11
        Turkish TR 4-11
        Indonesian ID 4-11
        Filipino TL 4-11
        -

        How to play Wordle TR Unlimited?

        -

        Playing Wordle TR Unlimited is easy and fun. Here are the steps to follow:

        -

        Enter the first word

        -

        Go to Wordle TR Unlimited website and choose the language and mode you want to play. Then, enter any word with the correct length on the first row. For example, if you are playing in English with a five-letter word, you can enter "pasta" or "candy". The game will check your word and give you feedback.

        -

        Find out what letters are in the hidden word

        -

        The game will tell you if any of the letters in your word match the hidden word. If the letter is in the right position, it will turn green. If the letter is in the wrong position, it will turn yellow. If the letter is not in the word at all, it will turn gray. For example, if the hidden word is "pizza" and you entered "pasta", you will see that the letter "p" is green, the letter "a" is yellow, and the letters "s" and "t" are gray.

        -

        Use the clues to guess the hidden word

        -

        You can use these clues to narrow down your guesses and find the hidden word. You can also use a smart assistant to help you if you get stuck. The smart assistant will give you hints, such as how many vowels or consonants are in the word, or what letter is more likely to be in a certain position. You can also ask the smart assistant to reveal a letter for you, but this will cost you one try. You have six tries to guess the hidden word, unless you choose a different number in infinite mode.

        -

        Switch to infinite mode for unlimited puzzles

        -

        If you want to play more than one puzzle a day, you can switch to infinite mode and play as many puzzles as you want. You can also choose different word lengths and languages for each puzzle. You can also create your own puzzles with any word and challenge your friends.

        -

        Conclusion

        -

        Wordle TR Unlimited is a website that allows you to play unlimited word puzzles like Wordle. It is a fan-made project that offers more options and features for word game lovers. You can play with words from 4 to 11 letters long, in different languages and modes, and with a smart assistant to help you. You can also create your own puzzles and explore other games like Wordle. Wordle TR Unlimited is a fun and engaging way to improve your vocabulary, spelling, logic, and creativity skills.

        -

        FAQs

        -
          -
        • Q: Is Wordle TR Unlimited free?
        • -
        • A: Yes, Wordle TR Unlimited is free to use and does not require any registration or download.
        • -
        • Q: Is Wordle TR Unlimited safe?
        • -
        • A: Yes, Wordle TR Unlimited is safe and does not collect any personal information or data from its users.
        • -
        • Q: Is Wordle TR Unlimited affiliated with or endorsed by Josh Wardle or The New York Times?
        • -
        • A: No, Wordle TR Unlimited is not affiliated with or endorsed by Josh Wardle or The New York Times. It is a fan-made project that respects the original creator and source of Wordle.
        • -
        • Q: How can I contact Wordle TR Unlimited?
        • -
        • A: You can contact Wordle TR Unlimited by sending an email to wordletrunlimited@gmail.com.
        • -
        • Q: How can I support Wordle TR Unlimited?
        • -
        • A: You can support Wordle TR Unlimited by sharing it with your friends, giving feedback, making suggestions, or donating via PayPal.
        • -

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/Videoredo-Tvsuite-V4-Serial-Number.md b/spaces/tioseFevbu/cartoon-converter/Videoredo-Tvsuite-V4-Serial-Number.md deleted file mode 100644 index 5d7cd84a030435073f5d6bf0576e6ec05912811f..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/Videoredo-Tvsuite-V4-Serial-Number.md +++ /dev/null @@ -1,82 +0,0 @@ -## Videoredo Tvsuite V4 Serial Number - - - - - - - - - -**Download ✅ [https://urluso.com/2tyQsT](https://urluso.com/2tyQsT)** - - - - - - - - - - - - - -# How to Edit and Convert Videos with Videoredo Tvsuite V4 - - - -Videoredo Tvsuite V4 is a powerful video editing and conversion software that supports both MPEG2 and H.264 formats. It can handle various input and output formats, such as transport streams, MP4, MOV, WTV, DVR-MS and more. It can also perform frame-accurate editing, ad removal, audio synchronization, subtitle insertion and DVD authoring. - - - -To use Videoredo Tvsuite V4, you need a valid serial number that you can purchase from the official website or get from an upgrade utility if you are a registered user of a previous version. You can also request a trial key that will remove the 15-minute limitation for 15 days. - - - -Once you have installed and registered Videoredo Tvsuite V4, you can start editing and converting your videos. Here are some basic steps to follow: - - - -1. Open a video file by clicking on the **Open Video** button or dragging and dropping it into the program window. - -2. Use the navigation buttons and the timeline to find the parts of the video that you want to edit. You can also use the **Scene Marker** button to mark the start and end points of each scene. - -3. To cut out unwanted parts of the video, select them with the mouse or the keyboard and press the **Cut Selection** button. You can also use the **Ad-Detective** feature to automatically scan and remove commercials from your video. - -4. To add transitions, effects, subtitles or audio tracks to your video, click on the **Tools** menu and select the option that you want. You can also use the **Joiner** feature to combine multiple video files into one. - -5. To convert your video to a different format, click on the **Save As** button and choose the output format that you want. You can also customize the video and audio settings by clicking on the **Options** button. - -6. To create a DVD from your video, click on the **Create DVD** button and follow the wizard steps. You can also use the **Burn DVD** feature to burn your DVD directly to a disc. - - - -Videoredo Tvsuite V4 is a versatile and easy-to-use video editing and conversion software that can help you create professional-looking videos in no time. You can download it from [here](https://videoredo.net/msgBoard/index.php?threads/getting-started-with-videoredo-tvsuite-v4-with-h-264.15759/) and try it for free for 15 days. - - - -Videoredo Tvsuite V4 has many features that make it stand out from other video editing and conversion software. Some of these features are: - - - -- **Frame-accurate editing**: Videoredo Tvsuite V4 can edit your videos without re-encoding them, which means you can preserve the original quality and save time. You can also edit H.264 videos without converting them to MPEG2 first, which is a unique feature among video editors. - -- **Ad-Detective**: Videoredo Tvsuite V4 can automatically scan and remove commercials from your videos with high accuracy. You can also fine-tune the detection settings and review the results before saving your video. - -- **Joiner**: Videoredo Tvsuite V4 can combine multiple video files into one without re-encoding them. You can also use the joiner to create chapters and menus for your DVD. - -- **Create DVD**: Videoredo Tvsuite V4 can create DVDs from your videos with a few clicks. You can choose from various templates and customize the menu layout, background, buttons and text. You can also add subtitles and audio tracks to your DVD. - -- **Burn DVD**: Videoredo Tvsuite V4 can burn your DVDs directly to a disc without using any other software. You can also use the burn DVD feature to create ISO images or folders that you can store on your hard drive or share online. - - - -Videoredo Tvsuite V4 is a comprehensive and user-friendly video editing and conversion software that can handle any video format and task. Whether you want to edit, convert, join, remove ads, create or burn DVDs, Videoredo Tvsuite V4 can do it all for you. You can download it from [here](https://videoredo.net/msgBoard/index.php?threads/getting-started-with-videoredo-tvsuite-v4-with-h-264.15759/) and try it for free for 15 days. - - 145887f19f - - - - - diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Download The Agnisakshi Movie Torrent TOP.md b/spaces/tioseFevbu/cartoon-converter/scripts/Download The Agnisakshi Movie Torrent TOP.md deleted file mode 100644 index 06209c54ea41e479a6ed1c3017989da7ab65e1b4..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Download The Agnisakshi Movie Torrent TOP.md +++ /dev/null @@ -1,32 +0,0 @@ - -Here is the content I generated: - -

        Download The Agnisakshi Movie Torrent: A Thrilling Drama About Love and Obsession

        - -

        Agnisakshi is a 1996 Hindi movie that stars Jackie Shroff, Nana Patekar and Manisha Koirala in the lead roles. The movie is a remake of the Hollywood thriller Sleeping with the Enemy, and revolves around a newly married couple who are haunted by the past of the wife.

        -

        Download The Agnisakshi Movie Torrent


        Download File ☆☆☆ https://urlcod.com/2uHxKF



        - -

        Suraj (Jackie Shroff) is a wealthy businessman who falls in love with Shivangi (Manisha Koirala), a beautiful woman who has escaped from her abusive husband Vishwanath (Nana Patekar). They get married and go on a honeymoon, where they encounter Vishwanath, who claims that Shivangi is his wife Madhu. Suraj refuses to believe him, but Vishwanath starts stalking and threatening them. He shows Suraj a video of his wedding with Madhu, who looks exactly like Shivangi. Suraj is shocked and confused, and wonders if Shivangi has lied to him.

        - -

        Shivangi reveals that she is indeed Madhu, and that she had faked her death to escape from Vishwanath, who used to torture her physically and mentally. She changed her identity and started a new life, but Vishwanath tracked her down. She pleads with Suraj to trust her and protect her from Vishwanath, who wants to kill her. Suraj decides to stand by Shivangi, and confronts Vishwanath. A violent struggle ensues, in which Suraj accidentally pushes Vishwanath off a cliff.

        - -

        Suraj and Shivangi think they are finally free from Vishwanath, but they are wrong. Vishwanath survives the fall, and continues to pursue them. He kidnaps Shivangi and tries to prove that she is Madhu by looking for a mark on her body. Suraj rescues her in time, and they flee to their home. However, Vishwanath follows them there too, and attacks them with a gun. In the final showdown, Suraj manages to shoot Vishwanath dead, and saves Shivangi.

        -

        - -

        Agnisakshi is a gripping movie that keeps the viewers on the edge of their seats. The movie showcases the brilliant performances of Nana Patekar, who won the National Film Award for Best Supporting Actor for his role as the psychotic Vishwanath, and Manisha Koirala, who convincingly portrays the trauma and fear of Shivangi/Madhu. Jackie Shroff also does a commendable job as the supportive and loving Suraj. The movie also has a melodious soundtrack by Nadeem-Shravan, which adds to the emotional quotient of the movie.

        - -

        If you are looking for a thrilling drama about love and obsession, you should download the Agnisakshi movie torrent from a reliable source. You will not regret watching this movie that will keep you hooked till the end.

        Here is the continuation of the article: - -

        Agnisakshi was a huge hit at the box office, and received positive reviews from critics and audiences alike. The movie was praised for its gripping plot, suspenseful direction, and powerful performances. The movie also won several awards, including the National Film Award for Best Supporting Actor for Nana Patekar, and the Filmfare Award for Best Villain for Nana Patekar.

        - -

        The movie is considered to be one of the best thrillers of Bollywood, and one of the finest roles of Nana Patekar. His portrayal of Vishwanath, the obsessive and violent husband, is chilling and unforgettable. He brings out the menace and madness of his character with his intense expressions and dialogue delivery. He also shows his versatility by switching from a loving husband to a ruthless killer in a matter of seconds.

        - -

        Manisha Koirala also delivers a remarkable performance as Shivangi/Madhu, the woman who suffers at the hands of Vishwanath. She portrays the fear, pain, and courage of her character with conviction and sensitivity. She also shares a great chemistry with Jackie Shroff, who plays Suraj, the supportive and loyal husband. Jackie Shroff gives a decent performance as Suraj, who stands by Shivangi despite the odds.

        - -

        The movie also has some memorable scenes that add to the thrill and drama of the story. For instance, the scene where Vishwanath confronts Shivangi at her birthday party and reveals her past to Suraj; the scene where Vishwanath kidnaps Shivangi and tries to mark her with a knife; the scene where Suraj shoots Vishwanath in front of a crowd; and the scene where Vishwanath sings a song to Shivangi on the phone.

        - -

        The movie also has some flaws that can be overlooked in the light of its overall impact. For instance, the movie has some songs that are unnecessary and break the flow of the story; the movie has some logical loopholes that are hard to believe; and the movie has some cliched dialogues that are typical of Bollywood movies.

        - -

        However, these flaws do not take away from the fact that Agnisakshi is a thrilling and entertaining movie that will keep you hooked till the end. The movie is a must-watch for fans of thrillers, dramas, and Nana Patekar. You can download the Agnisakshi movie torrent from a reliable source and enjoy this movie at your convenience.

        7196e7f11a
        -
        -
        \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Jammer-pro-5-crackl.md b/spaces/tioseFevbu/cartoon-converter/scripts/Jammer-pro-5-crackl.md deleted file mode 100644 index b5bda135c591ff963cb694dbac7e831f5de7c64b..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Jammer-pro-5-crackl.md +++ /dev/null @@ -1,24 +0,0 @@ - -

        How to Crack Jammer Pro 5 and Enjoy Its Features

        -

        Jammer Pro 5 is a powerful audio workstation that lets you create musical accompaniment and full arrangements of popular songs and original music in a wide variety of styles. You can connect your MIDI devices or import files on 256 different tracks, each with a set of thorough editing and processing tools. You can also use the built-in studio musicians to automatically generate drums, bass and rhythm tracks.

        -

        Jammer-pro-5-crackl


        Download Zip ····· https://urlcod.com/2uHwnK



        -

        However, Jammer Pro 5 is not a free software. You need to purchase a license to use it without any limitations. If you don't want to pay for it, you might be tempted to look for a crack that can bypass the activation process and unlock all the features. But is it worth it?

        -

        The Risks of Using a Crack

        -

        Using a crack for Jammer Pro 5 or any other software is illegal and unethical. You are violating the copyright laws and the terms of service of the software developer. You are also depriving them of their rightful income and discouraging them from creating more quality products.

        -

        Moreover, using a crack can expose your computer to various security risks. You never know what kind of malware or viruses are hidden in the crack files. You might end up infecting your system with spyware, ransomware, trojans or other malicious programs that can steal your personal data, damage your files or compromise your online accounts.

        -

        -

        Using a crack can also affect the performance and stability of your software. You might experience crashes, errors, glitches or compatibility issues that can ruin your musical projects. You might also miss out on the latest updates and bug fixes that can improve your user experience and enhance your creativity.

        -

        The Benefits of Using a Legitimate Version

        -

        Instead of using a crack for Jammer Pro 5, you should consider using a legitimate version that you can download from the official website[^1^] [^2^]. You can try the free demo version for yourself to check out the features and styles before upgrading to the full version for $59.95[^4^]. By doing so, you will enjoy the following benefits:

        -
          -
        • You will support the software developer and encourage them to continue developing and improving Jammer Pro 5.
        • -
        • You will get access to all the features and styles without any restrictions or limitations.
        • -
        • You will get regular updates and bug fixes that can enhance your software performance and stability.
        • -
        • You will get technical support and customer service from the software developer in case you encounter any problems or have any questions.
        • -
        • You will avoid any legal or ethical issues that might arise from using a crack.
        • -
        • You will protect your computer from any malware or viruses that might come with a crack.
        • -
        -

        Conclusion

        -

        Jammer Pro 5 is a great software for creating musical accompaniment and full arrangements of songs and music in various styles. However, using a crack for Jammer Pro 5 is not a good idea. It is illegal, unethical, risky and unreliable. You should use a legitimate version instead and enjoy its features without any worries.

        81aa517590
        -
        -
        \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Lg E610 Hard Reset Vygis REPACK Cracked.md b/spaces/tioseFevbu/cartoon-converter/scripts/Lg E610 Hard Reset Vygis REPACK Cracked.md deleted file mode 100644 index 10f9c41c06f76405fd51f9fd654e9c9fcf6b623e..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Lg E610 Hard Reset Vygis REPACK Cracked.md +++ /dev/null @@ -1,58 +0,0 @@ - -

        How to Hard Reset LG E610 Optimus L5 with Vygis Cracked

        -

        If you have forgotten your password, pattern or PIN on your LG E610 Optimus L5, you may need to perform a hard reset to restore your phone to its factory settings. A hard reset will erase all your personal data and settings, so make sure you back up your important files before proceeding.

        -

        Lg E610 Hard Reset Vygis Cracked


        Download File ✶✶✶ https://urlcod.com/2uHw8i



        -

        There are two methods to hard reset your LG E610 Optimus L5: using the hardware buttons or using the Vygis Cracked software. The Vygis Cracked software is a tool that can unlock and flash various LG phones, including the LG E610 Optimus L5. You will need a USB cable and a computer to use this method.

        -

        Method 1: Using the Hardware Buttons

        -

        This method is suitable if you can access the recovery mode on your phone. Follow these steps to hard reset your LG E610 Optimus L5 using the hardware buttons:

        -
          -
        1. Turn off your phone completely. If you can not unlock the screen, press and hold the power button for about 20 seconds and the phone will turn off[^4^].
        2. -
        3. Press and hold Volume Down + Home + Power button simultaneously. When the LED keys on the bottom light up, release the power button but keep holding the volume down and home buttons[^3^].
        4. -
        5. You will see the LG logo and then a screen with an android robot and a progress bar. Wait for a few seconds until the progress bar reaches 100%.
        6. -
        7. You will see a menu with several options. Use the volume buttons to navigate and the power button to select. Choose "wipe data/factory reset" and confirm with "Yes".
        8. -
        9. Wait for the process to complete. Then choose "reboot system now" and wait for your phone to restart.
        10. -
        -

        Your LG E610 Optimus L5 should be hard reset and ready to use.

        -

        Method 2: Using Vygis Cracked Software

        -

        This method is suitable if you can not access the recovery mode on your phone or if you want to unlock your phone from any network. Follow these steps to hard reset your LG E610 Optimus L5 using Vygis Cracked software:

        -
          -
        1. Download and install Vygis Cracked software from this link: (insert link here).
        2. -
        3. Run Vygis Cracked software on your computer and select "LG" from the menu.
        4. -
        5. Select "E610" from the model list and click "Connect".
        6. -
        7. Connect your phone to your computer using a USB cable. Make sure your phone is turned off.
        8. -
        9. Vygis Cracked software will detect your phone and display its information. Click "Unlock" to unlock your phone from any network.
        10. -
        11. Click "Flash" to flash your phone with a new firmware. This will also erase all your data and settings.
        12. -
        13. Wait for the process to complete. Then disconnect your phone from your computer and turn it on.
        14. -
        -

        Your LG E610 Optimus L5 should be hard reset and unlocked from any network.

        -

        - -

        Benefits of Hard Resetting Your LG E610 Optimus L5

        -

        Hard resetting your LG E610 Optimus L5 can have several benefits, such as:

        -
          -
        • Fixing software issues and glitches that may cause your phone to freeze, crash or run slowly.
        • -
        • Removing viruses, malware and other harmful programs that may infect your phone and compromise your security and privacy.
        • -
        • Clearing up storage space and improving your phone's performance and battery life.
        • -
        • Resetting your phone to its original state and removing any customizations or modifications that may void your warranty or cause problems.
        • -
        • Unlocking your phone from any network and allowing you to use any SIM card of your choice.
        • -
        -

        However, hard resetting your LG E610 Optimus L5 also has some drawbacks, such as:

        -
          -
        • Losing all your personal data and settings, including your contacts, messages, photos, videos, music, apps and more. You will need to back up your important files before hard resetting your phone.
        • -
        • Requiring a Google account to activate your phone after hard resetting it. You will need to remember your Google account username and password or create a new one.
        • -
        • Possibly bricking your phone if you use an incompatible firmware or interrupt the flashing process. You will need to follow the instructions carefully and use a reliable software tool like Vygis Cracked.
        • -
        -

        Therefore, you should weigh the pros and cons of hard resetting your LG E610 Optimus L5 before deciding to do it.

        -

        Tips for Using Your LG E610 Optimus L5 After Hard Resetting It

        -

        After hard resetting your LG E610 Optimus L5, you may want to follow these tips to make the most of your phone:

        -
          -
        • Restore your backed up data and settings to your phone using a cloud service or a memory card.
        • -
        • Update your phone's software to the latest version available to get the best features and security patches.
        • -
        • Install only the apps that you need and trust from the official Google Play Store or other reputable sources.
        • -
        • Avoid clicking on suspicious links or downloading unknown files that may contain viruses or malware.
        • -
        • Use a screen lock and a PIN or password to protect your phone from unauthorized access.
        • -
        • Enable Find My Device on your phone to locate it if you lose it or erase it remotely if it gets stolen.
        • -
        -

        By following these tips, you can enjoy using your LG E610 Optimus L5 after hard resetting it.

        7b8c122e87
        -
        -
        \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/msgpack/__init__.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/msgpack/__init__.py deleted file mode 100644 index 5071021898725e4f787daf6c3624ae7f25a018f7..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/msgpack/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -# coding: utf-8 -from .exceptions import * -from .ext import ExtType, Timestamp - -import os -import sys - - -version = (1, 0, 4) -__version__ = "1.0.4" - - -if os.environ.get("MSGPACK_PUREPYTHON") or sys.version_info[0] == 2: - from .fallback import Packer, unpackb, Unpacker -else: - try: - from ._cmsgpack import Packer, unpackb, Unpacker - except ImportError: - from .fallback import Packer, unpackb, Unpacker - - -def pack(o, stream, **kwargs): - """ - Pack object `o` and write it to `stream` - - See :class:`Packer` for options. - """ - packer = Packer(**kwargs) - stream.write(packer.pack(o)) - - -def packb(o, **kwargs): - """ - Pack object `o` and return packed bytes - - See :class:`Packer` for options. - """ - return Packer(**kwargs).pack(o) - - -def unpack(stream, **kwargs): - """ - Unpack an object from `stream`. - - Raises `ExtraData` when `stream` contains extra bytes. - See :class:`Unpacker` for options. - """ - data = stream.read() - return unpackb(data, **kwargs) - - -# alias for compatibility to simplejson/marshal/pickle. -load = unpack -loads = unpackb - -dump = pack -dumps = packb diff --git a/spaces/tomofi/MMOCR/configs/textrecog/seg/seg_r31_1by16_fpnocr_academic.py b/spaces/tomofi/MMOCR/configs/textrecog/seg/seg_r31_1by16_fpnocr_academic.py deleted file mode 100644 index 4e37856c06fb43cb0b67a6a1760bd7ef9eeddb66..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/configs/textrecog/seg/seg_r31_1by16_fpnocr_academic.py +++ /dev/null @@ -1,40 +0,0 @@ -_base_ = [ - '../../_base_/default_runtime.py', - '../../_base_/recog_pipelines/seg_pipeline.py', - '../../_base_/recog_models/seg.py', - '../../_base_/recog_datasets/ST_charbox_train.py', - '../../_base_/recog_datasets/academic_test.py' -] - -train_list = {{_base_.train_list}} -test_list = {{_base_.test_list}} - -train_pipeline = {{_base_.train_pipeline}} -test_pipeline = {{_base_.test_pipeline}} - -# optimizer -optimizer = dict(type='Adam', lr=1e-4) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict(policy='step', step=[3, 4]) -total_epochs = 5 - -find_unused_parameters = True - -data = dict( - samples_per_gpu=16, - workers_per_gpu=2, - train=dict( - type='UniformConcatDataset', - datasets=train_list, - pipeline=train_pipeline), - val=dict( - type='UniformConcatDataset', - datasets=test_list, - pipeline=test_pipeline), - test=dict( - type='UniformConcatDataset', - datasets=test_list, - pipeline=test_pipeline)) - -evaluation = dict(interval=1, metric='acc') diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py deleted file mode 100644 index 497d03f6f702ecb47cccbe0089089b5a002ebcca..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' -img_norm_cfg = dict( - mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/hrnet/README.md b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/hrnet/README.md deleted file mode 100644 index 472f8ad31dbdd65b3192e84736bedc428e1e613e..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/hrnet/README.md +++ /dev/null @@ -1,88 +0,0 @@ -# High-resolution networks (HRNets) for object detection - -## Introduction - - - -```latex -@inproceedings{SunXLW19, - title={Deep High-Resolution Representation Learning for Human Pose Estimation}, - author={Ke Sun and Bin Xiao and Dong Liu and Jingdong Wang}, - booktitle={CVPR}, - year={2019} -} - -@article{SunZJCXLMWLW19, - title={High-Resolution Representations for Labeling Pixels and Regions}, - author={Ke Sun and Yang Zhao and Borui Jiang and Tianheng Cheng and Bin Xiao - and Dong Liu and Yadong Mu and Xinggang Wang and Wenyu Liu and Jingdong Wang}, - journal = {CoRR}, - volume = {abs/1904.04514}, - year={2019} -} -``` - -## Results and Models - -### Faster R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :-------------:|:------:| :------:| :--------:| -| HRNetV2p-W18 | pytorch | 1x | 6.6 | 13.4 | 36.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco/faster_rcnn_hrnetv2p_w18_1x_coco_20200130_211246.log.json) | -| HRNetV2p-W18 | pytorch | 2x | 6.6 | | 38.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco/faster_rcnn_hrnetv2p_w18_2x_coco_20200702_085731-a4ec0611.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco/faster_rcnn_hrnetv2p_w18_2x_coco_20200702_085731.log.json) | -| HRNetV2p-W32 | pytorch | 1x | 9.0 | 12.4 | 40.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco/faster_rcnn_hrnetv2p_w32_1x_coco_20200130-6e286425.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco/faster_rcnn_hrnetv2p_w32_1x_coco_20200130_204442.log.json) | -| HRNetV2p-W32 | pytorch | 2x | 9.0 | | 41.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco/faster_rcnn_hrnetv2p_w32_2x_coco_20200529_015927-976a9c15.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco/faster_rcnn_hrnetv2p_w32_2x_coco_20200529_015927.log.json) | -| HRNetV2p-W40 | pytorch | 1x | 10.4 | 10.5 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco/faster_rcnn_hrnetv2p_w40_1x_coco_20200210-95c1f5ce.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco/faster_rcnn_hrnetv2p_w40_1x_coco_20200210_125315.log.json) | -| HRNetV2p-W40 | pytorch | 2x | 10.4 | | 42.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco/faster_rcnn_hrnetv2p_w40_2x_coco_20200512_161033-0f236ef4.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco/faster_rcnn_hrnetv2p_w40_2x_coco_20200512_161033.log.json) | - -### Mask R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :-------------:|:------:| :------:|:------:|:--------:| -| HRNetV2p-W18 | pytorch | 1x | 7.0 | 11.7 | 37.7 | 34.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco/mask_rcnn_hrnetv2p_w18_1x_coco_20200205-1c3d78ed.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco/mask_rcnn_hrnetv2p_w18_1x_coco_20200205_232523.log.json) | -| HRNetV2p-W18 | pytorch | 2x | 7.0 | - | 39.8 | 36.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco/mask_rcnn_hrnetv2p_w18_2x_coco_20200212-b3c825b1.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco/mask_rcnn_hrnetv2p_w18_2x_coco_20200212_134222.log.json) | -| HRNetV2p-W32 | pytorch | 1x | 9.4 | 11.3 | 41.2 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco/mask_rcnn_hrnetv2p_w32_1x_coco_20200207-b29f616e.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco/mask_rcnn_hrnetv2p_w32_1x_coco_20200207_055017.log.json) | -| HRNetV2p-W32 | pytorch | 2x | 9.4 | - | 42.5 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco/mask_rcnn_hrnetv2p_w32_2x_coco_20200213-45b75b4d.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco/mask_rcnn_hrnetv2p_w32_2x_coco_20200213_150518.log.json) | -| HRNetV2p-W40 | pytorch | 1x | 10.9 | | 42.1 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco/mask_rcnn_hrnetv2p_w40_1x_coco_20200511_015646-66738b35.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco/mask_rcnn_hrnetv2p_w40_1x_coco_20200511_015646.log.json) | -| HRNetV2p-W40 | pytorch | 2x | 10.9 | | 42.8 | 38.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco/mask_rcnn_hrnetv2p_w40_2x_coco_20200512_163732-aed5e4ab.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco/mask_rcnn_hrnetv2p_w40_2x_coco_20200512_163732.log.json) | - -### Cascade R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :-------------:|:------:| :------: | :--------: | -| HRNetV2p-W18 | pytorch | 20e | 7.0 | 11.0 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco/cascade_rcnn_hrnetv2p_w18_20e_coco_20200210-434be9d7.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco/cascade_rcnn_hrnetv2p_w18_20e_coco_20200210_105632.log.json) | -| HRNetV2p-W32 | pytorch | 20e | 9.4 | 11.0 | 43.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco/cascade_rcnn_hrnetv2p_w32_20e_coco_20200208-928455a4.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco/cascade_rcnn_hrnetv2p_w32_20e_coco_20200208_160511.log.json) | -| HRNetV2p-W40 | pytorch | 20e | 10.8 | | 43.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco/cascade_rcnn_hrnetv2p_w40_20e_coco_20200512_161112-75e47b04.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco/cascade_rcnn_hrnetv2p_w40_20e_coco_20200512_161112.log.json) | - -### Cascade Mask R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :-------------:|:------:| :------:|:------:|:--------:| -| HRNetV2p-W18 | pytorch | 20e | 8.5 | 8.5 |41.6 |36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco/cascade_mask_rcnn_hrnetv2p_w18_20e_coco_20200210-b543cd2b.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco/cascade_mask_rcnn_hrnetv2p_w18_20e_coco_20200210_093149.log.json) | -| HRNetV2p-W32 | pytorch | 20e | | 8.3 |44.3 |38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco/cascade_mask_rcnn_hrnetv2p_w32_20e_coco_20200512_154043-39d9cf7b.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco/cascade_mask_rcnn_hrnetv2p_w32_20e_coco_20200512_154043.log.json) | -| HRNetV2p-W40 | pytorch | 20e | 12.5 | |45.1 |39.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco/cascade_mask_rcnn_hrnetv2p_w40_20e_coco_20200527_204922-969c4610.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco/cascade_mask_rcnn_hrnetv2p_w40_20e_coco_20200527_204922.log.json) | - -### Hybrid Task Cascade (HTC) - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :-------------:|:------:| :------:|:------:|:--------:| -| HRNetV2p-W18 | pytorch | 20e | 10.8 | 4.7 | 42.8 | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/htc_hrnetv2p_w18_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w18_20e_coco/htc_hrnetv2p_w18_20e_coco_20200210-b266988c.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w18_20e_coco/htc_hrnetv2p_w18_20e_coco_20200210_182735.log.json) | -| HRNetV2p-W32 | pytorch | 20e | 13.1 | 4.9 | 45.4 | 39.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/htc_hrnetv2p_w32_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w32_20e_coco/htc_hrnetv2p_w32_20e_coco_20200207-7639fa12.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w32_20e_coco/htc_hrnetv2p_w32_20e_coco_20200207_193153.log.json) | -| HRNetV2p-W40 | pytorch | 20e | 14.6 | | 46.4 | 40.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/htc_hrnetv2p_w40_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w40_20e_coco/htc_hrnetv2p_w40_20e_coco_20200529_183411-417c4d5b.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w40_20e_coco/htc_hrnetv2p_w40_20e_coco_20200529_183411.log.json) | - -### FCOS - -| Backbone | Style | GN | MS train | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -|:---------:|:-------:|:-------:|:--------:|:-------:|:------:|:------:|:------:|:------:|:--------:| -|HRNetV2p-W18| pytorch | Y | N | 1x | 13.0 | 12.9 | 35.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco_20201212_100710-4ad151de.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco_20201212_100710.log.json) | -|HRNetV2p-W18| pytorch | Y | N | 2x | 13.0 | - | 38.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco_20201212_101110-5c575fa5.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco_20201212_101110.log.json) | -|HRNetV2p-W32| pytorch | Y | N | 1x | 17.5 | 12.9 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco_20201211_134730-cb8055c0.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco_20201211_134730.log.json) | -|HRNetV2p-W32| pytorch | Y | N | 2x | 17.5 | - | 40.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco_20201212_112133-77b6b9bb.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco_20201212_112133.log.json) | -|HRNetV2p-W18| pytorch | Y | Y | 2x | 13.0 | 12.9 | 38.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco_20201212_111651-441e9d9f.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco_20201212_111651.log.json) | -|HRNetV2p-W32| pytorch | Y | Y | 2x | 17.5 | 12.4 | 41.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco_20201212_090846-b6f2b49f.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco_20201212_090846.log.json) | -|HRNetV2p-W48| pytorch | Y | Y | 2x | 20.3 | 10.8 | 42.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco_20201212_124752-f22d2ce5.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco_20201212_124752.log.json) | - -**Note:** - -- The `28e` schedule in HTC indicates decreasing the lr at 24 and 27 epochs, with a total of 28 epochs. -- HRNetV2 ImageNet pretrained models are in [HRNets for Image Classification](https://github.com/HRNet/HRNet-Image-Classification). diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/bbox/assigners/atss_assigner.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/bbox/assigners/atss_assigner.py deleted file mode 100644 index d4fe9d0e3c8704bd780d493eff20a5505dbe9580..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/bbox/assigners/atss_assigner.py +++ /dev/null @@ -1,178 +0,0 @@ -import torch - -from ..builder import BBOX_ASSIGNERS -from ..iou_calculators import build_iou_calculator -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - - -@BBOX_ASSIGNERS.register_module() -class ATSSAssigner(BaseAssigner): - """Assign a corresponding gt bbox or background to each bbox. - - Each proposals will be assigned with `0` or a positive integer - indicating the ground truth index. - - - 0: negative sample, no assigned gt - - positive integer: positive sample, index (1-based) of assigned gt - - Args: - topk (float): number of bbox selected in each level - """ - - def __init__(self, - topk, - iou_calculator=dict(type='BboxOverlaps2D'), - ignore_iof_thr=-1): - self.topk = topk - self.iou_calculator = build_iou_calculator(iou_calculator) - self.ignore_iof_thr = ignore_iof_thr - - # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py - - def assign(self, - bboxes, - num_level_bboxes, - gt_bboxes, - gt_bboxes_ignore=None, - gt_labels=None): - """Assign gt to bboxes. - - The assignment is done in following steps - - 1. compute iou between all bbox (bbox of all pyramid levels) and gt - 2. compute center distance between all bbox and gt - 3. on each pyramid level, for each gt, select k bbox whose center - are closest to the gt center, so we total select k*l bbox as - candidates for each gt - 4. get corresponding iou for the these candidates, and compute the - mean and std, set mean + std as the iou threshold - 5. select these candidates whose iou are greater than or equal to - the threshold as positive - 6. limit the positive sample's center in gt - - - Args: - bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). - num_level_bboxes (List): num of bboxes in each level - gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). - gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are - labelled as `ignored`, e.g., crowd boxes in COCO. - gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). - - Returns: - :obj:`AssignResult`: The assign result. - """ - INF = 100000000 - bboxes = bboxes[:, :4] - num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0) - - # compute iou between all bbox and gt - overlaps = self.iou_calculator(bboxes, gt_bboxes) - - # assign 0 by default - assigned_gt_inds = overlaps.new_full((num_bboxes, ), - 0, - dtype=torch.long) - - if num_gt == 0 or num_bboxes == 0: - # No ground truth or boxes, return empty assignment - max_overlaps = overlaps.new_zeros((num_bboxes, )) - if num_gt == 0: - # No truth, assign everything to background - assigned_gt_inds[:] = 0 - if gt_labels is None: - assigned_labels = None - else: - assigned_labels = overlaps.new_full((num_bboxes, ), - -1, - dtype=torch.long) - return AssignResult( - num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) - - # compute center distance between all bbox and gt - gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0 - gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0 - gt_points = torch.stack((gt_cx, gt_cy), dim=1) - - bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0 - bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0 - bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1) - - distances = (bboxes_points[:, None, :] - - gt_points[None, :, :]).pow(2).sum(-1).sqrt() - - if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None - and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0): - ignore_overlaps = self.iou_calculator( - bboxes, gt_bboxes_ignore, mode='iof') - ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) - ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr - distances[ignore_idxs, :] = INF - assigned_gt_inds[ignore_idxs] = -1 - - # Selecting candidates based on the center distance - candidate_idxs = [] - start_idx = 0 - for level, bboxes_per_level in enumerate(num_level_bboxes): - # on each pyramid level, for each gt, - # select k bbox whose center are closest to the gt center - end_idx = start_idx + bboxes_per_level - distances_per_level = distances[start_idx:end_idx, :] - selectable_k = min(self.topk, bboxes_per_level) - _, topk_idxs_per_level = distances_per_level.topk( - selectable_k, dim=0, largest=False) - candidate_idxs.append(topk_idxs_per_level + start_idx) - start_idx = end_idx - candidate_idxs = torch.cat(candidate_idxs, dim=0) - - # get corresponding iou for the these candidates, and compute the - # mean and std, set mean + std as the iou threshold - candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)] - overlaps_mean_per_gt = candidate_overlaps.mean(0) - overlaps_std_per_gt = candidate_overlaps.std(0) - overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt - - is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :] - - # limit the positive sample's center in gt - for gt_idx in range(num_gt): - candidate_idxs[:, gt_idx] += gt_idx * num_bboxes - ep_bboxes_cx = bboxes_cx.view(1, -1).expand( - num_gt, num_bboxes).contiguous().view(-1) - ep_bboxes_cy = bboxes_cy.view(1, -1).expand( - num_gt, num_bboxes).contiguous().view(-1) - candidate_idxs = candidate_idxs.view(-1) - - # calculate the left, top, right, bottom distance between positive - # bbox center and gt side - l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] - t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] - r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 - is_pos = is_pos & is_in_gts - - # if an anchor box is assigned to multiple gts, - # the one with the highest IoU will be selected. - overlaps_inf = torch.full_like(overlaps, - -INF).t().contiguous().view(-1) - index = candidate_idxs.view(-1)[is_pos.view(-1)] - overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] - overlaps_inf = overlaps_inf.view(num_gt, -1).t() - - max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) - assigned_gt_inds[ - max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 - - if gt_labels is not None: - assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) - pos_inds = torch.nonzero( - assigned_gt_inds > 0, as_tuple=False).squeeze() - if pos_inds.numel() > 0: - assigned_labels[pos_inds] = gt_labels[ - assigned_gt_inds[pos_inds] - 1] - else: - assigned_labels = None - return AssignResult( - num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) diff --git a/spaces/trttung1610/musicgen/audiocraft/modules/rope.py b/spaces/trttung1610/musicgen/audiocraft/modules/rope.py deleted file mode 100644 index 503e6748df2bb72b3c864c20b37cba5498ffdd21..0000000000000000000000000000000000000000 --- a/spaces/trttung1610/musicgen/audiocraft/modules/rope.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp - -from torch import nn -import torch - - -class XPos(nn.Module): - """Length-extrapolatable positional embedding (xPos) from [Sun et al 2022](https://arxiv.org/abs/2212.10554v1). - This applies an exponential decay to the RoPE rotation matrix. - - Args: - dim (int): Embedding dimension. - smoothing (float): Smoothing factor applied to the decay rates. - base_scale (int): Base decay rate, given in terms of scaling time. - device (torch.device, optional): Device on which to initialize the module. - dtype (torch.dtype): dtype to use to generate the embedding. - """ - def __init__(self, dim: int, smoothing: float = 0.4, base_scale: int = 512, - device=None, dtype: torch.dtype = torch.float32): - super().__init__() - assert dim % 2 == 0 - assert dtype in [torch.float64, torch.float32] - self.dtype = dtype - self.base_scale = base_scale - - half_dim = dim // 2 - adim = torch.arange(half_dim, device=device, dtype=dtype) - decay_rates = (adim / half_dim + smoothing) / (1.0 + smoothing) - self.register_buffer("decay_rates", decay_rates) - self.decay: tp.Optional[torch.Tensor] = None - - def get_decay(self, start: int, end: int): - """Create complex decay tensor, cache values for fast computation.""" - if self.decay is None or end > self.decay.shape[0]: - assert isinstance(self.decay_rates, torch.Tensor) # Satisfy type checker. - idx = torch.arange(end, device=self.decay_rates.device, dtype=self.dtype) - power = idx / self.base_scale - scale = self.decay_rates ** power.unsqueeze(-1) - self.decay = torch.polar(scale, torch.zeros_like(scale)) - return self.decay[start:end] # [T, C/2] - - -class RotaryEmbedding(nn.Module): - """Rotary positional embedding (RoPE) from [Su et al 2022](https://arxiv.org/abs/2104.09864). - - Args: - dim (int): Embedding dimension (twice the number of frequencies). - max_period (float): Maximum period of the rotation frequencies. - xpos (bool): Use xPos, applies an exponential decay to rotation matrix. - scale (float): Scale of positional embedding, set to 0 to deactivate. - device (torch.device, optional): Device on which to initialize the module. - dtype (torch.dtype): dtype to use to generate the embedding. - """ - def __init__(self, dim: int, max_period: float = 10000.0, xpos: bool = False, - scale: float = 1.0, device=None, dtype: torch.dtype = torch.float32): - super().__init__() - assert dim % 2 == 0 - self.scale = scale - assert dtype in [torch.float64, torch.float32] - self.dtype = dtype - - adim = torch.arange(0, dim, 2, device=device, dtype=dtype)[: (dim // 2)] - frequencies = 1.0 / (max_period ** (adim / dim)) - self.register_buffer("frequencies", frequencies) - self.rotation: tp.Optional[torch.Tensor] = None - - self.xpos = XPos(dim, device=device, dtype=dtype) if xpos else None - - def get_rotation(self, start: int, end: int): - """Create complex rotation tensor, cache values for fast computation.""" - if self.rotation is None or end > self.rotation.shape[0]: - assert isinstance(self.frequencies, torch.Tensor) # Satisfy type checker. - idx = torch.arange(end, device=self.frequencies.device, dtype=self.dtype) - angles = torch.outer(idx, self.frequencies) - self.rotation = torch.polar(torch.ones_like(angles), angles) - return self.rotation[start:end] - - def rotate(self, x: torch.Tensor, start: int = 0, invert_decay: bool = False): - """Apply rope rotation to query or key tensor.""" - T = x.shape[1] - rotation = self.get_rotation(start, start + T).unsqueeze(0).unsqueeze(2) - - if self.xpos: - decay = self.xpos.get_decay(start, start + T).unsqueeze(0).unsqueeze(2) - else: - decay = 1.0 - - if invert_decay: - decay = decay ** -1 - - x_complex = torch.view_as_complex(x.to(self.dtype).reshape(*x.shape[:-1], -1, 2)) - scaled_rotation = (rotation * decay) * self.scale + (1.0 - self.scale) - x_out = torch.view_as_real(x_complex * scaled_rotation).flatten(-2) - - return x_out.type_as(x) - - def rotate_qk(self, query: torch.Tensor, key: torch.Tensor, start: int = 0): - """ Apply rope rotation to both query and key tensors. - Supports streaming mode, in which query and key are not expected to have the same shape. - In streaming mode, key will be of length [P + C] with P the cached past timesteps, but - query will be [C] (typically C == 1). - - Args: - query (torch.Tensor): Query to rotate. - key (torch.Tensor): Key to rotate. - start (int): Start index of the sequence for time offset. - """ - query_timesteps = query.shape[1] - key_timesteps = key.shape[1] - streaming_offset = key_timesteps - query_timesteps - - query_out = self.rotate(query, start + streaming_offset) - key_out = self.rotate(key, start, invert_decay=True) - - return query_out, key_out diff --git a/spaces/truong-xuan-linh/auto-comment-generation/utils/get_logging.py b/spaces/truong-xuan-linh/auto-comment-generation/utils/get_logging.py deleted file mode 100644 index af9c0449f24048c6f4ef4570999c7573a6f2d275..0000000000000000000000000000000000000000 --- a/spaces/truong-xuan-linh/auto-comment-generation/utils/get_logging.py +++ /dev/null @@ -1,18 +0,0 @@ -import pytz -import logging -import datetime - -def get_logging(log_dir): - """Get logging function - - Args: - log_dir (String): location directory (relative path) - - Returns: - logging: custom logging - """ - - logging.Formatter.converter = logging.Formatter.converter = lambda *args: datetime.datetime.now(tz=pytz.timezone('Asia/Ho_Chi_Minh')).timetuple() - logging.basicConfig(filename=log_dir, level=logging.INFO, format = '[%(asctime)s] - [%(levelname)s] - [%(funcName)s] - [%(lineno)d] - %(message)s', - filemode="w") - return logging \ No newline at end of file diff --git a/spaces/tsfeng/DeepDanbooru-string/app.py b/spaces/tsfeng/DeepDanbooru-string/app.py deleted file mode 100644 index 8de2980dd2e2eb50cb01176532d2ae3a262140d5..0000000000000000000000000000000000000000 --- a/spaces/tsfeng/DeepDanbooru-string/app.py +++ /dev/null @@ -1,183 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import argparse -import functools -import os -import html -import pathlib -import tarfile - -import deepdanbooru as dd -import gradio as gr -import huggingface_hub -import numpy as np -import PIL.Image -import tensorflow as tf -import piexif -import piexif.helper - -TITLE = 'DeepDanbooru String' - -TOKEN = os.environ['TOKEN'] -MODEL_REPO = 'CikeyQI/DeepDanbooru_string' -MODEL_FILENAME = 'model-resnet_custom_v3.h5' -LABEL_FILENAME = 'tags.txt' - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser() - parser.add_argument('--score-slider-step', type=float, default=0.05) - parser.add_argument('--score-threshold', type=float, default=0.5) - parser.add_argument('--theme', type=str, default='dark-grass') - parser.add_argument('--live', action='store_true') - parser.add_argument('--share', action='store_true') - parser.add_argument('--port', type=int) - parser.add_argument('--disable-queue', - dest='enable_queue', - action='store_false') - parser.add_argument('--allow-flagging', type=str, default='never') - return parser.parse_args() - - -def load_sample_image_paths() -> list[pathlib.Path]: - image_dir = pathlib.Path('images') - if not image_dir.exists(): - dataset_repo = 'hysts/sample-images-TADNE' - path = huggingface_hub.hf_hub_download(dataset_repo, - 'images.tar.gz', - repo_type='dataset', - use_auth_token=TOKEN) - with tarfile.open(path) as f: - f.extractall() - return sorted(image_dir.glob('*')) - - -def load_model() -> tf.keras.Model: - path = huggingface_hub.hf_hub_download(MODEL_REPO, - MODEL_FILENAME, - use_auth_token=TOKEN) - model = tf.keras.models.load_model(path) - return model - - -def load_labels() -> list[str]: - path = huggingface_hub.hf_hub_download(MODEL_REPO, - LABEL_FILENAME, - use_auth_token=TOKEN) - with open(path) as f: - labels = [line.strip() for line in f.readlines()] - return labels - -def plaintext_to_html(text): - text = "

        " + "
        \n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "

        " - return text - -def predict(image: PIL.Image.Image, score_threshold: float, - model: tf.keras.Model, labels: list[str]) -> dict[str, float]: - rawimage = image - _, height, width, _ = model.input_shape - image = np.asarray(image) - image = tf.image.resize(image, - size=(height, width), - method=tf.image.ResizeMethod.AREA, - preserve_aspect_ratio=True) - image = image.numpy() - image = dd.image.transform_and_pad_image(image, width, height) - image = image / 255. - probs = model.predict(image[None, ...])[0] - probs = probs.astype(float) - res = dict() - for prob, label in zip(probs.tolist(), labels): - if prob < score_threshold: - continue - res[label] = prob - b = dict(sorted(res.items(),key=lambda item:item[1], reverse=True)) - a = ', '.join(list(b.keys())).replace('_',' ').replace('(','\(').replace(')','\)') - c = ', '.join(list(b.keys())) - - items = rawimage.info - geninfo = '' - - if "exif" in rawimage.info: - exif = piexif.load(rawimage.info["exif"]) - exif_comment = (exif or {}).get("Exif", {}).get(piexif.ExifIFD.UserComment, b'') - try: - exif_comment = piexif.helper.UserComment.load(exif_comment) - except ValueError: - exif_comment = exif_comment.decode('utf8', errors="ignore") - - items['exif comment'] = exif_comment - geninfo = exif_comment - - for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif', - 'loop', 'background', 'timestamp', 'duration']: - items.pop(field, None) - - geninfo = items.get('parameters', geninfo) - - info = f""" -

        PNG Info

        -""" - for key, text in items.items(): - info += f""" -
        -

        {plaintext_to_html(str(key))}

        -

        {plaintext_to_html(str(text))}

        -
        -""".strip()+"\n" - - if len(info) == 0: - message = "Nothing found in the image." - info = f"

        {message}

        " - - return (a,c,res,info) - - -def main(): - args = parse_args() - model = load_model() - labels = load_labels() - - func = functools.partial(predict, model=model, labels=labels) - func = functools.update_wrapper(func, predict) - - gr.Interface( - func, - [ - gr.inputs.Image(type='pil', label='Input'), - gr.inputs.Slider(0, - 1, - step=args.score_slider_step, - default=args.score_threshold, - label='Score Threshold'), - ], - [ - gr.outputs.Textbox(label='Output (string)'), - gr.outputs.Textbox(label='Output (raw string)'), - gr.outputs.Label(label='Output (label)'), - gr.outputs.HTML() - ], - examples=[ - ['miku.jpg',0.5], - ['miku2.jpg',0.5] - ], - title=TITLE, - description=''' -Demo for [KichangKim/DeepDanbooru](https://github.com/KichangKim/DeepDanbooru) with "ready to copy" prompt and a prompt analyzer. -Modified from [hysts/DeepDanbooru](https://huggingface.co/spaces/hysts/DeepDanbooru) -PNG Info code forked from [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) - ''', - theme=args.theme, - allow_flagging=args.allow_flagging, - live=args.live, - ).launch( - enable_queue=args.enable_queue, - server_port=args.port, - share=args.share, - ) - - -if __name__ == '__main__': - main() diff --git a/spaces/tzafrir/formajourney/app.py b/spaces/tzafrir/formajourney/app.py deleted file mode 100644 index 95171db67c4d6eab6f061b02a6c8667a84ceda02..0000000000000000000000000000000000000000 --- a/spaces/tzafrir/formajourney/app.py +++ /dev/null @@ -1,20 +0,0 @@ -import gradio as gr -from PIL import Image -import requests -from io import BytesIO -import time -import random - -def get_static_image(ai_prompt): - # Delay for 4-6 seconds - time.sleep(random.uniform(2, 6)) - - url = "https://scontent.fhfa2-2.fna.fbcdn.net/v/t1.6435-9/106507268_1738964529575143_6171887621612874617_n.jpg?stp=dst-jpg_p843x403&_nc_cat=105&ccb=1-7&_nc_sid=8bfeb9&_nc_ohc=EKYgXpVrvycAX83Mxhk&_nc_ht=scontent.fhfa2-2.fna&oh=00_AfCMHxWwL9ZWkgcr4Owk0RWcP7-6ogkupHpTLT4UQ38U4A&oe=64C38A56" - response = requests.get(url) - img = Image.open(BytesIO(response.content)) - - return img - -iface = gr.Interface(fn=get_static_image, inputs="text", outputs=gr.outputs.Image(type="pil")) - -iface.launch() diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/BS.Player PRO 2.68 Build 1077 Final Keys [ATOM] .rar.md b/spaces/usbethFlerru/sovits-modelsV2/example/BS.Player PRO 2.68 Build 1077 Final Keys [ATOM] .rar.md deleted file mode 100644 index 3689b18cb33f85a7488e7931b841983484e9e7a6..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/BS.Player PRO 2.68 Build 1077 Final Keys [ATOM] .rar.md +++ /dev/null @@ -1,5 +0,0 @@ - -

        5bd35b6a26 Full Italian Plays Songs 720p (English) Blogs Torrents Watch The Kite Runner Movie Free Torrents Hd Mp4 Programmer 4981a40d BILLY LANGE 2010 DIGITAL FULL PRICE EN Audio +Video 2015 VfWEXK4R826 Full Version Crack Windows 2020 All Of Taskbar Functions Keys Download For Pc Software Convert Files Full Version Intel Wi-Fi.rar Full Version Download Mac Task Manager Pro Freeware Programmer 630a8a5b The Abigail And The John Henry Stories Mp3 Temp Download Scrabble Word Finder X32 Ultimate.rar METROPOLIS SISTERS UNDERGROUND 3 FULL SEASON 2 COMPLETE DISC ZIP FULL EPISODE GXM-K5P_P3G Free Cloud 2.52B KVM VirtualBox 6.0.4 Serial.rar If I Stay And I Hope You Do Movie Free Download Full Zip Hd 720p Version Ms Windows XP Pro RAR Cracked CCleaner 18 Serial.zip 31pk ISO Hidden Passage Of Time Hack Software Download Dmwzn422 Full Crack [REPACK] Offshore.rar Fargate Balancing Work And Life In The World Of The City Premier Edition DVD Only Image Name Bn Nude 420 Full Version Keys [FREE] DtaInspector Joá Pickhardt Bibliography Written by Joáo Henrique de Campos Ribeiro. Dissemination: Cite: 25.3540. Copy Citation: 25.3540 Language: Title: Inspector Joá Pickhardt Published: 30-11-2004 Abstract: We present and discuss a total solar eclipse captured with an Apogee apodisation (Apogee) coronagraph on board of ESO's Very Large Telescope (VLT) on July 1st 2004. The coronal eclipse is obtained from a partial eclipse of the corona (Moon and Sun) recorded by an adaptive optics system operating on the VLT, which significantly improves the quality of the images. Our coronagraph experiments over a two-hours time period starting at 11:09UT. The location of the Moon is tracked using the SRIT-sats (Sun-Relief Imaging of Thesolar Transit) and centred at 02:05UT. The eclipse is recorded using a CCD fast cadence of 50ms. The solar corona was recorded through a very narrow annulus (opening angle of 2.5" with a width of 1.5") with a large central occulting ring (1.3m in diameter) centred on the Moon. The small eclipse is recorded using an occulting mask to sample a very small annulus (opening angle of 2.7" with a width of 0.6"). The on-sky FOV for the coronagraph is determined to be less than 1 arcminute in radius. Spectral data is obtained with a radial scan of the annuli with the VIMS instruments for the eclipse in the Ã(335-347nm) and the Ã(363-392nm) spectral ranges, and at the limb for a longer wavelength data set (482-505nm). An analysis of both data sets is performed to determine the speed and direction of the transiting Moon during the course of the eclipse.Evaluation of uremic cardiomyopathy in patients with end-stage renal disease.

        -

        BS.Player PRO 2.68 Build 1077 Final Keys [ATOM] .rar


        Download Ziphttps://urlcod.com/2uyXNg



        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/errors.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/errors.md deleted file mode 100644 index f3ddfc41f72c438826d54ff4f241e88e3751ec74..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/errors.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -description: Learn about HUBModelError in Ultralytics YOLO Docs. Resolve the error and get the most out of your YOLO model. -keywords: HUBModelError, Ultralytics YOLO, YOLO Documentation, Object detection errors, YOLO Errors, HUBModelError Solutions ---- - -## HUBModelError ---- -### ::: ultralytics.yolo.utils.errors.HUBModelError -

        diff --git a/spaces/vict0rsch/climateGAN/figures/metrics_onefig.py b/spaces/vict0rsch/climateGAN/figures/metrics_onefig.py deleted file mode 100644 index d9d372dcbb1bed2fffbfd8e81d6da749ceab730b..0000000000000000000000000000000000000000 --- a/spaces/vict0rsch/climateGAN/figures/metrics_onefig.py +++ /dev/null @@ -1,772 +0,0 @@ -""" -This scripts plots examples of the images that get best and worse metrics -""" -print("Imports...", end="") -import os -import sys -from argparse import ArgumentParser -from pathlib import Path - -import matplotlib.patches as mpatches -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import seaborn as sns -import yaml -from imageio import imread -from matplotlib.gridspec import GridSpec -from skimage.color import rgba2rgb -from sklearn.metrics.pairwise import euclidean_distances - -sys.path.append("../") - -from climategan.data import encode_mask_label -from climategan.eval_metrics import edges_coherence_std_min -from eval_masker import crop_and_resize - -# ----------------------- -# ----- Constants ----- -# ----------------------- - -# Metrics -metrics = ["error", "f05", "edge_coherence"] - -dict_metrics = { - "names": { - "tpr": "TPR, Recall, Sensitivity", - "tnr": "TNR, Specificity, Selectivity", - "fpr": "FPR", - "fpt": "False positives relative to image size", - "fnr": "FNR, Miss rate", - "fnt": "False negatives relative to image size", - "mpr": "May positive rate (MPR)", - "mnr": "May negative rate (MNR)", - "accuracy": "Accuracy (ignoring may)", - "error": "Error", - "f05": "F05 score", - "precision": "Precision", - "edge_coherence": "Edge coherence", - "accuracy_must_may": "Accuracy (ignoring cannot)", - }, - "key_metrics": ["error", "f05", "edge_coherence"], -} - - -# Colors -colorblind_palette = sns.color_palette("colorblind") -color_cannot = colorblind_palette[1] -color_must = colorblind_palette[2] -color_may = colorblind_palette[7] -color_pred = colorblind_palette[4] - -icefire = sns.color_palette("icefire", as_cmap=False, n_colors=5) -color_tp = icefire[0] -color_tn = icefire[1] -color_fp = icefire[4] -color_fn = icefire[3] - - -def parsed_args(): - """ - Parse and returns command-line args - - Returns: - argparse.Namespace: the parsed arguments - """ - parser = ArgumentParser() - parser.add_argument( - "--input_csv", - default="ablations_metrics_20210311.csv", - type=str, - help="CSV containing the results of the ablation study", - ) - parser.add_argument( - "--output_dir", - default=None, - type=str, - help="Output directory", - ) - parser.add_argument( - "--models_log_path", - default=None, - type=str, - help="Path containing the log files of the models", - ) - parser.add_argument( - "--masker_test_set_dir", - default=None, - type=str, - help="Directory containing the test images", - ) - parser.add_argument( - "--best_model", - default="dada, msd_spade, pseudo", - type=str, - help="The string identifier of the best model", - ) - parser.add_argument( - "--dpi", - default=200, - type=int, - help="DPI for the output images", - ) - parser.add_argument( - "--alpha", - default=0.5, - type=float, - help="Transparency of labels shade", - ) - parser.add_argument( - "--percentile", - default=0.05, - type=float, - help="Transparency of labels shade", - ) - parser.add_argument( - "--seed", - default=None, - type=int, - help="Bootstrap random seed, for reproducibility", - ) - parser.add_argument( - "--no_images", - action="store_true", - default=False, - help="Do not generate images", - ) - - return parser.parse_args() - - -def map_color(arr, input_color, output_color, rtol=1e-09): - """ - Maps one color to another - """ - input_color_arr = np.tile(input_color, (arr.shape[:2] + (1,))) - output = arr.copy() - output[np.all(np.isclose(arr, input_color_arr, rtol=rtol), axis=2)] = output_color - return output - - -def plot_labels(ax, img, label, img_id, n_, add_title, do_legend): - label_colmap = label.astype(float) - label_colmap = map_color(label_colmap, (255, 0, 0), color_cannot) - label_colmap = map_color(label_colmap, (0, 0, 255), color_must) - label_colmap = map_color(label_colmap, (0, 0, 0), color_may) - - ax.imshow(img) - ax.imshow(label_colmap, alpha=0.5) - ax.axis("off") - - if n_ in [1, 3, 5]: - color_ = "green" - else: - color_ = "red" - - ax.text( - -0.15, - 0.5, - img_id, - color=color_, - fontweight="roman", - fontsize="x-large", - horizontalalignment="left", - verticalalignment="center", - transform=ax.transAxes, - ) - - if add_title: - ax.set_title("Labels", rotation=0, fontsize="x-large") - - -def plot_pred(ax, img, pred, img_id, add_title, do_legend): - pred = np.tile(np.expand_dims(pred, axis=2), reps=(1, 1, 3)) - - pred_colmap = pred.astype(float) - pred_colmap = map_color(pred_colmap, (1, 1, 1), color_pred) - pred_colmap_ma = np.ma.masked_not_equal(pred_colmap, color_pred) - pred_colmap_ma = pred_colmap_ma.mask * img + pred_colmap_ma - - ax.imshow(img) - ax.imshow(pred_colmap_ma, alpha=0.5) - ax.axis("off") - - if add_title: - ax.set_title("Prediction", rotation=0, fontsize="x-large") - - -def plot_correct_incorrect( - ax, img_filename, img, metric, label, img_id, n_, add_title, do_legend -): - # FP - fp_map = imread( - model_path / "eval-metrics/fp" / "{}_fp.png".format(Path(img_filename).stem) - ) - fp_map = np.tile(np.expand_dims(fp_map, axis=2), reps=(1, 1, 3)) - - fp_map_colmap = fp_map.astype(float) - fp_map_colmap = map_color(fp_map_colmap, (1, 1, 1), color_fp) - - # FN - fn_map = imread( - model_path / "eval-metrics/fn" / "{}_fn.png".format(Path(img_filename).stem) - ) - fn_map = np.tile(np.expand_dims(fn_map, axis=2), reps=(1, 1, 3)) - - fn_map_colmap = fn_map.astype(float) - fn_map_colmap = map_color(fn_map_colmap, (1, 1, 1), color_fn) - - # TP - tp_map = imread( - model_path / "eval-metrics/tp" / "{}_tp.png".format(Path(img_filename).stem) - ) - tp_map = np.tile(np.expand_dims(tp_map, axis=2), reps=(1, 1, 3)) - - tp_map_colmap = tp_map.astype(float) - tp_map_colmap = map_color(tp_map_colmap, (1, 1, 1), color_tp) - - # TN - tn_map = imread( - model_path / "eval-metrics/tn" / "{}_tn.png".format(Path(img_filename).stem) - ) - tn_map = np.tile(np.expand_dims(tn_map, axis=2), reps=(1, 1, 3)) - - tn_map_colmap = tn_map.astype(float) - tn_map_colmap = map_color(tn_map_colmap, (1, 1, 1), color_tn) - - label_colmap = label.astype(float) - label_colmap = map_color(label_colmap, (0, 0, 0), color_may) - label_colmap_ma = np.ma.masked_not_equal(label_colmap, color_may) - label_colmap_ma = label_colmap_ma.mask * img + label_colmap_ma - - # Combine masks - maps = fp_map_colmap + fn_map_colmap + tp_map_colmap + tn_map_colmap - maps_ma = np.ma.masked_equal(maps, (0, 0, 0)) - maps_ma = maps_ma.mask * img + maps_ma - - ax.imshow(img) - ax.imshow(label_colmap_ma, alpha=0.5) - ax.imshow(maps_ma, alpha=0.5) - ax.axis("off") - - if add_title: - ax.set_title("Metric", rotation=0, fontsize="x-large") - - -def plot_edge_coherence(ax, img, metric, label, pred, img_id, n_, add_title, do_legend): - pred = np.tile(np.expand_dims(pred, axis=2), reps=(1, 1, 3)) - - ec, pred_ec, label_ec = edges_coherence_std_min( - np.squeeze(pred[:, :, 0]), np.squeeze(encode_mask_label(label, "flood")) - ) - - ################## - # Edge distances # - ################## - - # Location of edges - pred_ec_coord = np.argwhere(pred_ec > 0) - label_ec_coord = np.argwhere(label_ec > 0) - - # Normalized pairwise distances between pred and label - dist_mat = np.divide( - euclidean_distances(pred_ec_coord, label_ec_coord), pred_ec.shape[0] - ) - - # Standard deviation of the minimum distance from pred to label - min_dist = np.min(dist_mat, axis=1) # noqa: F841 - - ############# - # Make plot # - ############# - - pred_ec = np.tile( - np.expand_dims(np.asarray(pred_ec > 0, dtype=float), axis=2), reps=(1, 1, 3) - ) - pred_ec_colmap = map_color(pred_ec, (1, 1, 1), color_pred) - pred_ec_colmap_ma = np.ma.masked_not_equal(pred_ec_colmap, color_pred) # noqa: F841 - - label_ec = np.tile( - np.expand_dims(np.asarray(label_ec > 0, dtype=float), axis=2), reps=(1, 1, 3) - ) - label_ec_colmap = map_color(label_ec, (1, 1, 1), color_must) - label_ec_colmap_ma = np.ma.masked_not_equal( # noqa: F841 - label_ec_colmap, color_must - ) - - # Combined pred and label edges - combined_ec = pred_ec_colmap + label_ec_colmap - combined_ec_ma = np.ma.masked_equal(combined_ec, (0, 0, 0)) - combined_ec_img = combined_ec_ma.mask * img + combined_ec - - # Pred - pred_colmap = pred.astype(float) - pred_colmap = map_color(pred_colmap, (1, 1, 1), color_pred) - pred_colmap_ma = np.ma.masked_not_equal(pred_colmap, color_pred) - - # Must - label_colmap = label.astype(float) - label_colmap = map_color(label_colmap, (0, 0, 255), color_must) - label_colmap_ma = np.ma.masked_not_equal(label_colmap, color_must) - - # TP - tp_map = imread( - model_path / "eval-metrics/tp" / "{}_tp.png".format(Path(srs_sel.filename).stem) - ) - tp_map = np.tile(np.expand_dims(tp_map, axis=2), reps=(1, 1, 3)) - tp_map_colmap = tp_map.astype(float) - tp_map_colmap = map_color(tp_map_colmap, (1, 1, 1), color_tp) - tp_map_colmap_ma = np.ma.masked_not_equal(tp_map_colmap, color_tp) - - # Combination - comb_pred = ( - (pred_colmap_ma.mask ^ tp_map_colmap_ma.mask) - & tp_map_colmap_ma.mask - & combined_ec_ma.mask - ) * pred_colmap - comb_label = ( - (label_colmap_ma.mask ^ pred_colmap_ma.mask) - & pred_colmap_ma.mask - & combined_ec_ma.mask - ) * label_colmap - comb_tp = combined_ec_ma.mask * tp_map_colmap.copy() - combined = comb_tp + comb_label + comb_pred - combined_ma = np.ma.masked_equal(combined, (0, 0, 0)) - combined_ma = combined_ma.mask * combined_ec_img + combined_ma - - ax.imshow(combined_ec_img, alpha=1) - ax.imshow(combined_ma, alpha=0.5) - ax.axis("off") - - # Plot lines - idx_sort_x = np.argsort(pred_ec_coord[:, 1]) - offset = 100 - for idx in range(offset, pred_ec_coord.shape[0], offset): - y0, x0 = pred_ec_coord[idx_sort_x[idx], :] - argmin = np.argmin(dist_mat[idx_sort_x[idx]]) - y1, x1 = label_ec_coord[argmin, :] - ax.plot([x0, x1], [y0, y1], color="white", linewidth=0.5) - - if add_title: - ax.set_title("Metric", rotation=0, fontsize="x-large") - - -def plot_images_metric( - axes, metric, img_filename, img_id, n_, srs_sel, add_title, do_legend -): - - # Read images - img_path = imgs_orig_path / img_filename - label_path = labels_path / "{}_labeled.png".format(Path(img_filename).stem) - img, label = crop_and_resize(img_path, label_path) - img = rgba2rgb(img) if img.shape[-1] == 4 else img / 255.0 - - pred = imread( - model_path / "eval-metrics/pred" / "{}_pred.png".format(Path(img_filename).stem) - ) - - # Label - plot_labels(axes[0], img, label, img_id, n_, add_title, do_legend) - - # Prediction - plot_pred(axes[1], img, pred, img_id, add_title, do_legend) - - # Correct / incorrect - if metric in ["error", "f05"]: - plot_correct_incorrect( - axes[2], - img_filename, - img, - metric, - label, - img_id, - n_, - add_title, - do_legend=False, - ) - handles = [] - lw = 1.0 - handles.append( - mpatches.Patch(facecolor=color_tp, label="TP", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch(facecolor=color_tn, label="TN", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch(facecolor=color_fp, label="FP", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch(facecolor=color_fn, label="FN", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch( - facecolor=color_may, - label="May-be-flooded", - linewidth=lw, - alpha=0.66, - ) - ) - labels = ["TP", "TN", "FP", "FN", "May-be-flooded"] - if metric == "error": - if n_ in [1, 3, 5]: - title = "Low error rate" - else: - title = "High error rate" - else: - if n_ in [1, 3, 5]: - title = "High F05 score" - else: - title = "Low F05 score" - # Edge coherence - elif metric == "edge_coherence": - plot_edge_coherence( - axes[2], img, metric, label, pred, img_id, n_, add_title, do_legend=False - ) - handles = [] - lw = 1.0 - handles.append( - mpatches.Patch(facecolor=color_tp, label="TP", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch(facecolor=color_pred, label="pred", linewidth=lw, alpha=0.66) - ) - handles.append( - mpatches.Patch( - facecolor=color_must, - label="Must-be-flooded", - linewidth=lw, - alpha=0.66, - ) - ) - labels = ["TP", "Prediction", "Must-be-flooded"] - if n_ in [1, 3, 5]: - title = "High edge coherence" - else: - title = "Low edge coherence" - - else: - raise ValueError - - labels_values_title = "Error: {:.4f} \nFO5: {:.4f} \nEdge coherence: {:.4f}".format( - srs_sel.error, srs_sel.f05, srs_sel.edge_coherence - ) - - plot_legend(axes[3], img, handles, labels, labels_values_title, title) - - -def plot_legend(ax, img, handles, labels, labels_values_title, title): - img_ = np.zeros_like(img, dtype=np.uint8) - img_.fill(255) - ax.imshow(img_) - ax.axis("off") - - leg1 = ax.legend( - handles=handles, - labels=labels, - title=title, - title_fontsize="medium", - labelspacing=0.6, - loc="upper left", - fontsize="x-small", - frameon=False, - ) - leg1._legend_box.align = "left" - - leg2 = ax.legend( - title=labels_values_title, - title_fontsize="small", - loc="lower left", - frameon=False, - ) - leg2._legend_box.align = "left" - - ax.add_artist(leg1) - - -def scatterplot_metrics_pair(ax, df, x_metric, y_metric, dict_images): - - sns.scatterplot(data=df, x=x_metric, y=y_metric, ax=ax) - - # Set X-label - ax.set_xlabel(dict_metrics["names"][x_metric], rotation=0, fontsize="medium") - - # Set Y-label - ax.set_ylabel(dict_metrics["names"][y_metric], rotation=90, fontsize="medium") - - # Change spines - sns.despine(ax=ax, left=True, bottom=True) - - annotate_scatterplot(ax, dict_images, x_metric, y_metric) - - -def scatterplot_metrics(ax, df, df_all, dict_images, plot_all=False): - - # Other - if plot_all: - sns.scatterplot( - data=df_all.loc[df_all.ground == True], - x="error", y="f05", hue="edge_coherence", ax=ax, - marker='+', alpha=0.25) - sns.scatterplot( - data=df_all.loc[df_all.instagan == True], - x="error", y="f05", hue="edge_coherence", ax=ax, - marker='x', alpha=0.25) - sns.scatterplot( - data=df_all.loc[(df_all.instagan == False) & (df_all.instagan == False) & - (df_all.model_feats != args.best_model)], - x="error", y="f05", hue="edge_coherence", ax=ax, - marker='s', alpha=0.25) - - # Best model - cmap_ = sns.cubehelix_palette(as_cmap=True) - sns.scatterplot( - data=df, x="error", y="f05", hue="edge_coherence", ax=ax, palette=cmap_ - ) - - norm = plt.Normalize(df["edge_coherence"].min(), df["edge_coherence"].max()) - sm = plt.cm.ScalarMappable(cmap=cmap_, norm=norm) - sm.set_array([]) - - # Remove the legend and add a colorbar - ax.get_legend().remove() - ax_cbar = ax.figure.colorbar(sm) - ax_cbar.set_label("Edge coherence", labelpad=8) - - # Set X-label - ax.set_xlabel(dict_metrics["names"]["error"], rotation=0, fontsize="medium") - - # Set Y-label - ax.set_ylabel(dict_metrics["names"]["f05"], rotation=90, fontsize="medium") - - annotate_scatterplot(ax, dict_images, "error", "f05") - - # Change spines - sns.despine(ax=ax, left=True, bottom=True) - - # Set XY limits - xlim = ax.get_xlim() - ylim = ax.get_ylim() - ax.set_xlim([0.0, xlim[1]]) - ax.set_ylim([ylim[0], 1.0]) - - -def annotate_scatterplot(ax, dict_images, x_metric, y_metric, offset=0.1): - xlim = ax.get_xlim() - ylim = ax.get_ylim() - x_len = xlim[1] - xlim[0] - y_len = ylim[1] - ylim[0] - x_th = xlim[1] - x_len / 2.0 - y_th = ylim[1] - y_len / 2.0 - for text, d in dict_images.items(): - if text in ["B", "D", "F"]: - x = d[x_metric] - y = d[y_metric] - - x_text = x + x_len * offset if x < x_th else x - x_len * offset - y_text = y + y_len * offset if y < y_th else y - y_len * offset - - ax.annotate( - xy=(x, y), - xycoords="data", - xytext=(x_text, y_text), - textcoords="data", - text=text, - arrowprops=dict(facecolor="black", shrink=0.05), - fontsize="medium", - color="black", - ) - elif text == "A": - x = ( - dict_images["A"][x_metric] - + dict_images["C"][x_metric] - + dict_images["E"][x_metric] - ) / 3 - y = ( - dict_images["A"][y_metric] - + dict_images["C"][y_metric] - + dict_images["E"][y_metric] - ) / 3 - - x_text = x + x_len * 2 * offset if x < x_th else x - x_len * 2 * offset - y_text = ( - y + y_len * 0.45 * offset if y < y_th else y - y_len * 0.45 * offset - ) - - ax.annotate( - xy=(x, y), - xycoords="data", - xytext=(x_text, y_text), - textcoords="data", - text="A, C, E", - arrowprops=dict(facecolor="black", shrink=0.05), - fontsize="medium", - color="black", - ) - - -if __name__ == "__main__": - # ----------------------------- - # ----- Parse arguments ----- - # ----------------------------- - args = parsed_args() - print("Args:\n" + "\n".join([f" {k:20}: {v}" for k, v in vars(args).items()])) - - # Determine output dir - if args.output_dir is None: - output_dir = Path(os.environ["SLURM_TMPDIR"]) - else: - output_dir = Path(args.output_dir) - if not output_dir.exists(): - output_dir.mkdir(parents=True, exist_ok=False) - - # Store args - output_yml = output_dir / "labels.yml" - with open(output_yml, "w") as f: - yaml.dump(vars(args), f) - - # Data dirs - imgs_orig_path = Path(args.masker_test_set_dir) / "imgs" - labels_path = Path(args.masker_test_set_dir) / "labels" - - # Read CSV - df_all = pd.read_csv(args.input_csv, index_col="model_img_idx") - - # Select best model - df = df_all.loc[df_all.model_feats == args.best_model] - v_key, model_dir = df.model.unique()[0].split("/") - model_path = Path(args.models_log_path) / "ablation-{}".format(v_key) / model_dir - - # Set up plot - sns.reset_orig() - sns.set(style="whitegrid") - plt.rcParams.update({"font.family": "serif"}) - plt.rcParams.update( - { - "font.serif": [ - "Computer Modern Roman", - "Times New Roman", - "Utopia", - "New Century Schoolbook", - "Century Schoolbook L", - "ITC Bookman", - "Bookman", - "Times", - "Palatino", - "Charter", - "serif" "Bitstream Vera Serif", - "DejaVu Serif", - ] - } - ) - - if args.seed: - np.random.seed(args.seed) - img_ids = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - dict_images = {} - idx = 0 - - # Define grid of subplots - grid_vmargin = 0.03 # Extent of the vertical margin between metric grids - ax_hspace = 0.04 # Extent of the vertical space between axes of same grid - ax_wspace = 0.05 # Extent of the horizontal space between axes of same grid - n_grids = len(metrics) - n_cols = 4 - n_rows = 2 - h_grid = (1.0 / n_grids) - ((n_grids - 1) * grid_vmargin) / n_grids - - fig1 = plt.figure(dpi=200, figsize=(11, 13)) - - n_ = 0 - add_title = False - for metric_id, metric in enumerate(metrics): - - # Create grid - top_grid = 1.0 - metric_id * h_grid - metric_id * grid_vmargin - bottom_grid = top_grid - h_grid - gridspec = GridSpec( - n_rows, - n_cols, - wspace=ax_wspace, - hspace=ax_hspace, - bottom=bottom_grid, - top=top_grid, - ) - - # Select best - if metric == "error": - ascending = True - else: - ascending = False - idx_rand = np.random.permutation(int(args.percentile * len(df)))[0] - srs_sel = df.sort_values(by=metric, ascending=ascending).iloc[idx_rand] - img_id = img_ids[idx] - dict_images.update({img_id: srs_sel}) - # Read images - img_filename = srs_sel.filename - - axes_row = [fig1.add_subplot(gridspec[0, c]) for c in range(n_cols)] - if not args.no_images: - n_ += 1 - if metric_id == 0: - add_title = True - plot_images_metric( - axes_row, - metric, - img_filename, - img_id, - n_, - srs_sel, - add_title=add_title, - do_legend=False, - ) - add_title = False - - idx += 1 - print("1 more row done.") - # Select worst - if metric == "error": - ascending = False - else: - ascending = True - idx_rand = np.random.permutation(int(args.percentile * len(df)))[0] - srs_sel = df.sort_values(by=metric, ascending=ascending).iloc[idx_rand] - img_id = img_ids[idx] - dict_images.update({img_id: srs_sel}) - # Read images - img_filename = srs_sel.filename - - axes_row = [fig1.add_subplot(gridspec[1, c]) for c in range(n_cols)] - if not args.no_images: - n_ += 1 - plot_images_metric( - axes_row, - metric, - img_filename, - img_id, - n_, - srs_sel, - add_title=add_title, - do_legend=False, - ) - - idx += 1 - print("1 more row done.") - - output_fig = output_dir / "all_metrics.png" - - fig1.tight_layout() # (pad=1.5) # - fig1.savefig(output_fig, dpi=fig1.dpi, bbox_inches="tight") - - # Scatter plot - fig2 = plt.figure(dpi=200) - - scatterplot_metrics(fig2.gca(), df, df_all, dict_images) - - # fig2, axes = plt.subplots(nrows=1, ncols=3, dpi=200, figsize=(18, 5)) - # - # scatterplot_metrics_pair(axes[0], df, "error", "f05", dict_images) - # scatterplot_metrics_pair(axes[1], df, "error", "edge_coherence", dict_images) - # scatterplot_metrics_pair(axes[2], df, "f05", "edge_coherence", dict_images) - - output_fig = output_dir / "scatterplots.png" - fig2.savefig(output_fig, dpi=fig2.dpi, bbox_inches="tight") diff --git a/spaces/vivym/image-matting-app/ppmatting/models/dim.py b/spaces/vivym/image-matting-app/ppmatting/models/dim.py deleted file mode 100644 index 5d9ae654322242f785407e61ff7b8405d6b443b4..0000000000000000000000000000000000000000 --- a/spaces/vivym/image-matting-app/ppmatting/models/dim.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import defaultdict -import paddle -import paddle.nn as nn -import paddle.nn.functional as F -from paddleseg.models import layers -from paddleseg import utils -from paddleseg.cvlibs import manager - -from ppmatting.models.losses import MRSD - - -@manager.MODELS.add_component -class DIM(nn.Layer): - """ - The DIM implementation based on PaddlePaddle. - - The original article refers to - Ning Xu, et, al. "Deep Image Matting" - (https://arxiv.org/pdf/1908.07919.pdf). - - Args: - backbone: backbone model. - stage (int, optional): The stage of model. Defautl: 3. - decoder_input_channels(int, optional): The channel of decoder input. Default: 512. - pretrained(str, optional): The path of pretrianed model. Defautl: None. - - """ - - def __init__(self, - backbone, - stage=3, - decoder_input_channels=512, - pretrained=None): - super().__init__() - self.backbone = backbone - self.pretrained = pretrained - self.stage = stage - self.loss_func_dict = None - - decoder_output_channels = [64, 128, 256, 512] - self.decoder = Decoder( - input_channels=decoder_input_channels, - output_channels=decoder_output_channels) - if self.stage == 2: - for param in self.backbone.parameters(): - param.stop_gradient = True - for param in self.decoder.parameters(): - param.stop_gradient = True - if self.stage >= 2: - self.refine = Refine() - self.init_weight() - - def forward(self, inputs): - input_shape = paddle.shape(inputs['img'])[-2:] - x = paddle.concat([inputs['img'], inputs['trimap'] / 255], axis=1) - fea_list = self.backbone(x) - - # decoder stage - up_shape = [] - for i in range(5): - up_shape.append(paddle.shape(fea_list[i])[-2:]) - alpha_raw = self.decoder(fea_list, up_shape) - alpha_raw = F.interpolate( - alpha_raw, input_shape, mode='bilinear', align_corners=False) - logit_dict = {'alpha_raw': alpha_raw} - if self.stage < 2: - return logit_dict - - if self.stage >= 2: - # refine stage - refine_input = paddle.concat([inputs['img'], alpha_raw], axis=1) - alpha_refine = self.refine(refine_input) - - # finally alpha - alpha_pred = alpha_refine + alpha_raw - alpha_pred = F.interpolate( - alpha_pred, input_shape, mode='bilinear', align_corners=False) - if not self.training: - alpha_pred = paddle.clip(alpha_pred, min=0, max=1) - logit_dict['alpha_pred'] = alpha_pred - if self.training: - loss_dict = self.loss(logit_dict, inputs) - return logit_dict, loss_dict - else: - return alpha_pred - - def loss(self, logit_dict, label_dict, loss_func_dict=None): - if loss_func_dict is None: - if self.loss_func_dict is None: - self.loss_func_dict = defaultdict(list) - self.loss_func_dict['alpha_raw'].append(MRSD()) - self.loss_func_dict['comp'].append(MRSD()) - self.loss_func_dict['alpha_pred'].append(MRSD()) - else: - self.loss_func_dict = loss_func_dict - - loss = {} - mask = label_dict['trimap'] == 128 - loss['all'] = 0 - - if self.stage != 2: - loss['alpha_raw'] = self.loss_func_dict['alpha_raw'][0]( - logit_dict['alpha_raw'], label_dict['alpha'], mask) - loss['alpha_raw'] = 0.5 * loss['alpha_raw'] - loss['all'] = loss['all'] + loss['alpha_raw'] - - if self.stage == 1 or self.stage == 3: - comp_pred = logit_dict['alpha_raw'] * label_dict['fg'] + \ - (1 - logit_dict['alpha_raw']) * label_dict['bg'] - loss['comp'] = self.loss_func_dict['comp'][0]( - comp_pred, label_dict['img'], mask) - loss['comp'] = 0.5 * loss['comp'] - loss['all'] = loss['all'] + loss['comp'] - - if self.stage == 2 or self.stage == 3: - loss['alpha_pred'] = self.loss_func_dict['alpha_pred'][0]( - logit_dict['alpha_pred'], label_dict['alpha'], mask) - loss['all'] = loss['all'] + loss['alpha_pred'] - - return loss - - def init_weight(self): - if self.pretrained is not None: - utils.load_entire_model(self, self.pretrained) - - -# bilinear interpolate skip connect -class Up(nn.Layer): - def __init__(self, input_channels, output_channels): - super().__init__() - self.conv = layers.ConvBNReLU( - input_channels, - output_channels, - kernel_size=5, - padding=2, - bias_attr=False) - - def forward(self, x, skip, output_shape): - x = F.interpolate( - x, size=output_shape, mode='bilinear', align_corners=False) - x = x + skip - x = self.conv(x) - x = F.relu(x) - - return x - - -class Decoder(nn.Layer): - def __init__(self, input_channels, output_channels=(64, 128, 256, 512)): - super().__init__() - self.deconv6 = nn.Conv2D( - input_channels, input_channels, kernel_size=1, bias_attr=False) - self.deconv5 = Up(input_channels, output_channels[-1]) - self.deconv4 = Up(output_channels[-1], output_channels[-2]) - self.deconv3 = Up(output_channels[-2], output_channels[-3]) - self.deconv2 = Up(output_channels[-3], output_channels[-4]) - self.deconv1 = Up(output_channels[-4], 64) - - self.alpha_conv = nn.Conv2D( - 64, 1, kernel_size=5, padding=2, bias_attr=False) - - def forward(self, fea_list, shape_list): - x = fea_list[-1] - x = self.deconv6(x) - x = self.deconv5(x, fea_list[4], shape_list[4]) - x = self.deconv4(x, fea_list[3], shape_list[3]) - x = self.deconv3(x, fea_list[2], shape_list[2]) - x = self.deconv2(x, fea_list[1], shape_list[1]) - x = self.deconv1(x, fea_list[0], shape_list[0]) - alpha = self.alpha_conv(x) - alpha = F.sigmoid(alpha) - - return alpha - - -class Refine(nn.Layer): - def __init__(self): - super().__init__() - self.conv1 = layers.ConvBNReLU( - 4, 64, kernel_size=3, padding=1, bias_attr=False) - self.conv2 = layers.ConvBNReLU( - 64, 64, kernel_size=3, padding=1, bias_attr=False) - self.conv3 = layers.ConvBNReLU( - 64, 64, kernel_size=3, padding=1, bias_attr=False) - self.alpha_pred = layers.ConvBNReLU( - 64, 1, kernel_size=3, padding=1, bias_attr=False) - - def forward(self, x): - x = self.conv1(x) - x = self.conv2(x) - x = self.conv3(x) - alpha = self.alpha_pred(x) - - return alpha diff --git a/spaces/wahaha/u2net_portrait/U-2-Net/utils/face_seg.py b/spaces/wahaha/u2net_portrait/U-2-Net/utils/face_seg.py deleted file mode 100644 index 4938ccebbb7e55f959889c1bf3bc2ca7dac7ffe6..0000000000000000000000000000000000000000 --- a/spaces/wahaha/u2net_portrait/U-2-Net/utils/face_seg.py +++ /dev/null @@ -1,44 +0,0 @@ -import os -import cv2 -import numpy as np -import tensorflow as tf -from tensorflow.python.platform import gfile - - -curPath = os.path.abspath(os.path.dirname(__file__)) - - -class FaceSeg: - def __init__(self, model_path=os.path.join(curPath, 'seg_model_384.pb')): - config = tf.compat.v1.ConfigProto() - config.gpu_options.allow_growth = True - self._graph = tf.Graph() - self._sess = tf.compat.v1.Session(config=config, graph=self._graph) - - self.pb_file_path = model_path - self._restore_from_pb() - self.input_op = self._sess.graph.get_tensor_by_name('input_1:0') - self.output_op = self._sess.graph.get_tensor_by_name('sigmoid/Sigmoid:0') - - def _restore_from_pb(self): - with self._sess.as_default(): - with self._graph.as_default(): - with gfile.FastGFile(self.pb_file_path, 'rb') as f: - graph_def = tf.compat.v1.GraphDef() - graph_def.ParseFromString(f.read()) - tf.import_graph_def(graph_def, name='') - - def input_transform(self, image): - image = cv2.resize(image, (384, 384), interpolation=cv2.INTER_AREA) - image_input = (image / 255.)[np.newaxis, :, :, :] - return image_input - - def output_transform(self, output, shape): - output = cv2.resize(output, (shape[1], shape[0])) - image_output = (output * 255).astype(np.uint8) - return image_output - - def get_mask(self, image): - image_input = self.input_transform(image) - output = self._sess.run(self.output_op, feed_dict={self.input_op: image_input})[0] - return self.output_transform(output, shape=image.shape[:2]) diff --git a/spaces/weibinke/vits-simple-api/vits/text/shanghainese.py b/spaces/weibinke/vits-simple-api/vits/text/shanghainese.py deleted file mode 100644 index eab92dcd22920a4bacd1d13721bb7cfaf2a3d667..0000000000000000000000000000000000000000 --- a/spaces/weibinke/vits-simple-api/vits/text/shanghainese.py +++ /dev/null @@ -1,66 +0,0 @@ -import re -import cn2an -import opencc -import config - -converter = opencc.OpenCC(config.ABS_PATH + '/chinese_dialect_lexicons/zaonhe') - -# List of (Latin alphabet, ipa) pairs: -_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('A', 'ᴇ'), - ('B', 'bi'), - ('C', 'si'), - ('D', 'di'), - ('E', 'i'), - ('F', 'ᴇf'), - ('G', 'dʑi'), - ('H', 'ᴇtɕʰ'), - ('I', 'ᴀi'), - ('J', 'dʑᴇ'), - ('K', 'kʰᴇ'), - ('L', 'ᴇl'), - ('M', 'ᴇm'), - ('N', 'ᴇn'), - ('O', 'o'), - ('P', 'pʰi'), - ('Q', 'kʰiu'), - ('R', 'ᴀl'), - ('S', 'ᴇs'), - ('T', 'tʰi'), - ('U', 'ɦiu'), - ('V', 'vi'), - ('W', 'dᴀbɤliu'), - ('X', 'ᴇks'), - ('Y', 'uᴀi'), - ('Z', 'zᴇ') -]] - - -def _number_to_shanghainese(num): - num = cn2an.an2cn(num).replace('一十', '十').replace('二十', '廿').replace('二', '两') - return re.sub(r'((?:^|[^三四五六七八九])十|廿)两', r'\1二', num) - - -def number_to_shanghainese(text): - return re.sub(r'\d+(?:\.?\d+)?', lambda x: _number_to_shanghainese(x.group()), text) - - -def latin_to_ipa(text): - for regex, replacement in _latin_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def shanghainese_to_ipa(text): - from vits.text.mandarin import symbols_to_chinese - text = symbols_to_chinese(text) - text = number_to_shanghainese(text.upper()) - text = converter.convert(text).replace('-', '').replace('$', ' ') - text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group()) + ' ', text) - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/wenpeng/Sod_Inpaint/app.py b/spaces/wenpeng/Sod_Inpaint/app.py deleted file mode 100644 index 5db2b73b9a30911639523322e643c170ce3bd0fe..0000000000000000000000000000000000000000 --- a/spaces/wenpeng/Sod_Inpaint/app.py +++ /dev/null @@ -1,46 +0,0 @@ -import gradio as gr -import inpaint.infer_model as inpaint -import sod.infer_model as sod -import numpy as np -import torch -import glob -import cv2 -# import os -# cmd = 'sh download.sh' -# os.system(cmd) - -device = torch.device("cpu") -print(device) -inpaint_model = inpaint.IVModel(device=device) -sod_model = sod.IVModel(device=device) -max_size=512 -scale_factor = 8 -count = 0 -def sod_inpaint(img): - global count - h,w = img.shape[:2] - if max(h, w) > max_size: - if h < w: - h, w = int(max_size * h / w), max_size - else: - h, w = max_size, int(max_size * w / h) - h = h // scale_factor * scale_factor - w = w // scale_factor * scale_factor - img = cv2.resize(img, (w,h)) - img = img[:,:,::-1] - sod_res = sod_model.forward(img,None) - sod_res = np.uint8(sod_res) - h,w = sod_res.shape[:2] - so = np.uint8(sod_res[:,:w//2,:] * (sod_res[:,w//2:,:]>0).astype(np.float32)) - inpaint_res = inpaint_model.forward(sod_res,None) - inpaint_res = np.uint8(inpaint_res) - count +=1 - print(count, ' images have been processed') - return so[:,:,::-1], inpaint_res[:,:,::-1] - - - -examples = glob.glob('examples/*.*') -inputs = gr.inputs.Image(shape=(512,512), image_mode="RGB", invert_colors=False, source="upload", tool="editor", type="numpy", label=None, optional=False) -iface = gr.Interface(fn=sod_inpaint, inputs=inputs, outputs=["image", "image"], examples=examples, title='Salient Object Detection + Inpaint', description='Upload an image and you will see the fg and inpainted bg', theme='huggingface') -iface.launch() diff --git a/spaces/xc9/VITS-Umamusume-voice-synthesizer/losses.py b/spaces/xc9/VITS-Umamusume-voice-synthesizer/losses.py deleted file mode 100644 index fb22a0e834dd87edaa37bb8190eee2c3c7abe0d5..0000000000000000000000000000000000000000 --- a/spaces/xc9/VITS-Umamusume-voice-synthesizer/losses.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch.nn import functional as F - -import commons - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - dg = dg.float() - l = torch.mean((1-dg)**2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - -def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l diff --git a/spaces/xiaoyeAI/clewd/Dockerfile b/spaces/xiaoyeAI/clewd/Dockerfile deleted file mode 100644 index f6ddf9ac542b5bd2f2d860ad9cef588013c8e902..0000000000000000000000000000000000000000 --- a/spaces/xiaoyeAI/clewd/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -# Use the official Node.js image as the base image -FROM node:20.4 - -# Set the working directory in the container -WORKDIR /app - -# Copy the package.json and package-lock.json files to the container -COPY package*.json ./ - -# Install the dependencies -RUN npm install --no-audit --fund false - -# Copy the rest of the files to the container -COPY . . - -# Change ownership of files in lib/bin and set permissions -RUN chown -R node:node lib/bin/* && \ - chmod u+x lib/bin/* && \ - chmod -R 777 /app - -# Run as the "node" user for better security practices -USER node - -RUN ls -la - -# Start the application -CMD ["node", "clewd.js"] diff --git a/spaces/xiaoyinqu/dreambooth/train_dreambooth.py b/spaces/xiaoyinqu/dreambooth/train_dreambooth.py deleted file mode 100644 index a496382fbc895961b9902c33a9d5cc926d4fcc8d..0000000000000000000000000000000000000000 --- a/spaces/xiaoyinqu/dreambooth/train_dreambooth.py +++ /dev/null @@ -1,881 +0,0 @@ -import argparse -import itertools -import math -import os -from pathlib import Path -from typing import Optional -import subprocess -import sys -import gc -import random - -import torch -import torch.nn.functional as F -import torch.utils.checkpoint -from torch.utils.data import Dataset - -from accelerate import Accelerator -from accelerate.logging import get_logger -from accelerate.utils import set_seed -from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel -from diffusers.optimization import get_scheduler -from huggingface_hub import HfFolder, Repository, whoami -from PIL import Image -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPTextModel, CLIPTokenizer - - -logger = get_logger(__name__) - - -def parse_args(): - parser = argparse.ArgumentParser(description="Simple example of a training script.") - parser.add_argument( - "--pretrained_model_name_or_path", - type=str, - default=None, - #required=True, - help="Path to pretrained model or model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--tokenizer_name", - type=str, - default=None, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--instance_data_dir", - type=str, - default=None, - #required=True, - help="A folder containing the training data of instance images.", - ) - parser.add_argument( - "--class_data_dir", - type=str, - default=None, - #required=False, - help="A folder containing the training data of class images.", - ) - parser.add_argument( - "--instance_prompt", - type=str, - default=None, - help="The prompt with identifier specifying the instance", - ) - parser.add_argument( - "--class_prompt", - type=str, - default="", - help="The prompt to specify images in the same class as provided instance images.", - ) - parser.add_argument( - "--with_prior_preservation", - default=False, - action="store_true", - help="Flag to add prior preservation loss.", - ) - parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") - parser.add_argument( - "--num_class_images", - type=int, - default=100, - help=( - "Minimal class images for prior preservation loss. If not have enough images, additional images will be" - " sampled with class_prompt." - ), - ) - parser.add_argument( - "--output_dir", - type=str, - default="", - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") - parser.add_argument( - "--resolution", - type=int, - default=512, - help=( - "The resolution for input images, all the images in the train/validation dataset will be resized to this" - " resolution" - ), - ) - parser.add_argument( - "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution" - ) - parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") - parser.add_argument( - "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." - ) - parser.add_argument( - "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." - ) - parser.add_argument("--num_train_epochs", type=int, default=1) - parser.add_argument( - "--max_train_steps", - type=int, - default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument( - "--gradient_checkpointing", - action="store_true", - help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=5e-6, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument( - "--scale_lr", - action="store_true", - default=False, - help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", - ) - parser.add_argument( - "--lr_scheduler", - type=str, - default="constant", - help=( - 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' - ' "constant", "constant_with_warmup"]' - ), - ) - parser.add_argument( - "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." - ) - parser.add_argument( - "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." - ) - parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") - parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") - parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") - parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") - parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") - parser.add_argument( - "--hub_model_id", - type=str, - default=None, - help="The name of the repository to keep in sync with the local `output_dir`.", - ) - parser.add_argument( - "--logging_dir", - type=str, - default="logs", - help=( - "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" - " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." - ), - ) - parser.add_argument( - "--mixed_precision", - type=str, - default="no", - choices=["no", "fp16", "bf16"], - help=( - "Whether to use mixed precision. Choose" - "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." - "and an Nvidia Ampere GPU." - ), - ) - - parser.add_argument( - "--save_n_steps", - type=int, - default=1, - help=("Save the model every n global_steps"), - ) - - - parser.add_argument( - "--save_starting_step", - type=int, - default=1, - help=("The step from which it starts saving intermediary checkpoints"), - ) - - parser.add_argument( - "--stop_text_encoder_training", - type=int, - default=1000000, - help=("The step at which the text_encoder is no longer trained"), - ) - - - parser.add_argument( - "--image_captions_filename", - action="store_true", - help="Get captions from filename", - ) - - - parser.add_argument( - "--dump_only_text_encoder", - action="store_true", - default=False, - help="Dump only text encoder", - ) - - parser.add_argument( - "--train_only_unet", - action="store_true", - default=False, - help="Train only the unet", - ) - - parser.add_argument( - "--cache_latents", - action="store_true", - default=False, - help="Train only the unet", - ) - - parser.add_argument( - "--Session_dir", - type=str, - default="", - help="Current session directory", - ) - - - - - parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - - args = parser.parse_args() - env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) - if env_local_rank != -1 and env_local_rank != args.local_rank: - args.local_rank = env_local_rank - - #if args.instance_data_dir is None: - # raise ValueError("You must specify a train data directory.") - - #if args.with_prior_preservation: - # if args.class_data_dir is None: - # raise ValueError("You must specify a data directory for class images.") - # if args.class_prompt is None: - # raise ValueError("You must specify prompt for class images.") - - return args - - -class DreamBoothDataset(Dataset): - """ - A dataset to prepare the instance and class images with the prompts for fine-tuning the model. - It pre-processes the images and the tokenizes prompts. - """ - - def __init__( - self, - instance_data_root, - instance_prompt, - tokenizer, - args, - class_data_root=None, - class_prompt=None, - size=512, - center_crop=False, - ): - self.size = size - self.center_crop = center_crop - self.tokenizer = tokenizer - self.image_captions_filename = None - - self.instance_data_root = Path(instance_data_root) - if not self.instance_data_root.exists(): - raise ValueError("Instance images root doesn't exists.") - - self.instance_images_path = list(Path(instance_data_root).iterdir()) - self.num_instance_images = len(self.instance_images_path) - self.instance_prompt = instance_prompt - self._length = self.num_instance_images - - if args.image_captions_filename: - self.image_captions_filename = True - - if class_data_root is not None: - self.class_data_root = Path(class_data_root) - self.class_data_root.mkdir(parents=True, exist_ok=True) - self.class_images_path = list(self.class_data_root.iterdir()) - random.shuffle(self.class_images_path) - self.num_class_images = len(self.class_images_path) - self._length = max(self.num_class_images, self.num_instance_images) - self.class_prompt = class_prompt - else: - self.class_data_root = None - - self.image_transforms = transforms.Compose( - [ - transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), - transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), - transforms.ToTensor(), - transforms.Normalize([0.5], [0.5]), - ] - ) - - def __len__(self): - return self._length - - def __getitem__(self, index): - example = {} - path = self.instance_images_path[index % self.num_instance_images] - instance_image = Image.open(path) - if not instance_image.mode == "RGB": - instance_image = instance_image.convert("RGB") - - instance_prompt = self.instance_prompt - - if self.image_captions_filename: - filename = Path(path).stem - pt=''.join([i for i in filename if not i.isdigit()]) - pt=pt.replace("_"," ") - pt=pt.replace("(","") - pt=pt.replace(")","") - pt=pt.replace("-","") - instance_prompt = pt - sys.stdout.write(" " +instance_prompt+" ") - sys.stdout.flush() - - - example["instance_images"] = self.image_transforms(instance_image) - example["instance_prompt_ids"] = self.tokenizer( - instance_prompt, - padding="do_not_pad", - truncation=True, - max_length=self.tokenizer.model_max_length, - ).input_ids - - if self.class_data_root: - class_image = Image.open(self.class_images_path[index % self.num_class_images]) - if not class_image.mode == "RGB": - class_image = class_image.convert("RGB") - example["class_images"] = self.image_transforms(class_image) - example["class_prompt_ids"] = self.tokenizer( - self.class_prompt, - padding="do_not_pad", - truncation=True, - max_length=self.tokenizer.model_max_length, - ).input_ids - - return example - - - -class PromptDataset(Dataset): - "A simple dataset to prepare the prompts to generate class images on multiple GPUs." - - def __init__(self, prompt, num_samples): - self.prompt = prompt - self.num_samples = num_samples - - def __len__(self): - return self.num_samples - - def __getitem__(self, index): - example = {} - example["prompt"] = self.prompt - example["index"] = index - return example - -class LatentsDataset(Dataset): - def __init__(self, latents_cache, text_encoder_cache): - self.latents_cache = latents_cache - self.text_encoder_cache = text_encoder_cache - - def __len__(self): - return len(self.latents_cache) - - def __getitem__(self, index): - return self.latents_cache[index], self.text_encoder_cache[index] - -def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): - if token is None: - token = HfFolder.get_token() - if organization is None: - username = whoami(token)["name"] - return f"{username}/{model_id}" - else: - return f"{organization}/{model_id}" - -def merge_two_dicts(starting_dict: dict, updater_dict: dict) -> dict: - """ - Starts from base starting dict and then adds the remaining key values from updater replacing the values from - the first starting/base dict with the second updater dict. - - For later: how does d = {**d1, **d2} replace collision? - - :param starting_dict: - :param updater_dict: - :return: - """ - new_dict: dict = starting_dict.copy() # start with keys and values of starting_dict - new_dict.update(updater_dict) # modifies starting_dict with keys and values of updater_dict - return new_dict - -def merge_args(args1: argparse.Namespace, args2: argparse.Namespace) -> argparse.Namespace: - """ - - ref: https://stackoverflow.com/questions/56136549/how-can-i-merge-two-argparse-namespaces-in-python-2-x - :param args1: - :param args2: - :return: - """ - # - the merged args - # The vars() function returns the __dict__ attribute to values of the given object e.g {field:value}. - merged_key_values_for_namespace: dict = merge_two_dicts(vars(args1), vars(args2)) - args = argparse.Namespace(**merged_key_values_for_namespace) - return args - -def run_training(args_imported): - args_default = parse_args() - args = merge_args(args_default, args_imported) - print(args) - logging_dir = Path(args.output_dir, args.logging_dir) - i=args.save_starting_step - accelerator = Accelerator( - gradient_accumulation_steps=args.gradient_accumulation_steps, - mixed_precision=args.mixed_precision, - log_with="tensorboard", - logging_dir=logging_dir, - ) - - # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate - # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. - # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. - if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: - raise ValueError( - "Gradient accumulation is not supported when training the text encoder in distributed training. " - "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." - ) - - if args.seed is not None: - set_seed(args.seed) - - if args.with_prior_preservation: - class_images_dir = Path(args.class_data_dir) - if not class_images_dir.exists(): - class_images_dir.mkdir(parents=True) - cur_class_images = len(list(class_images_dir.iterdir())) - - if cur_class_images < args.num_class_images: - torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, torch_dtype=torch_dtype - ) - pipeline.set_progress_bar_config(disable=True) - - num_new_images = args.num_class_images - cur_class_images - logger.info(f"Number of class images to sample: {num_new_images}.") - - sample_dataset = PromptDataset(args.class_prompt, num_new_images) - sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) - - sample_dataloader = accelerator.prepare(sample_dataloader) - pipeline.to(accelerator.device) - - for example in tqdm( - sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process - ): - with torch.autocast("cuda"): - images = pipeline(example["prompt"]).images - - for i, image in enumerate(images): - image.save(class_images_dir / f"{example['index'][i] + cur_class_images}.jpg") - - del pipeline - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - # Handle the repository creation - if accelerator.is_main_process: - if args.push_to_hub: - if args.hub_model_id is None: - repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) - else: - repo_name = args.hub_model_id - repo = Repository(args.output_dir, clone_from=repo_name) - - with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: - if "step_*" not in gitignore: - gitignore.write("step_*\n") - if "epoch_*" not in gitignore: - gitignore.write("epoch_*\n") - elif args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - - # Load the tokenizer - if args.tokenizer_name: - tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) - elif args.pretrained_model_name_or_path: - tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") - - # Load models and create wrapper for stable diffusion - if args.train_only_unet: - if os.path.exists(str(args.output_dir+"/text_encoder_trained")): - text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder_trained") - elif os.path.exists(str(args.output_dir+"/text_encoder")): - text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder") - else: - text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") - else: - text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") - vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") - unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") - - vae.requires_grad_(False) - if not args.train_text_encoder: - text_encoder.requires_grad_(False) - - if args.gradient_checkpointing: - unet.enable_gradient_checkpointing() - if args.train_text_encoder: - text_encoder.gradient_checkpointing_enable() - - if args.scale_lr: - args.learning_rate = ( - args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes - ) - - # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs - if args.use_8bit_adam: - try: - import bitsandbytes as bnb - except ImportError: - raise ImportError( - "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." - ) - - optimizer_class = bnb.optim.AdamW8bit - else: - optimizer_class = torch.optim.AdamW - - params_to_optimize = ( - itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() - ) - optimizer = optimizer_class( - params_to_optimize, - lr=args.learning_rate, - betas=(args.adam_beta1, args.adam_beta2), - weight_decay=args.adam_weight_decay, - eps=args.adam_epsilon, - ) - - noise_scheduler = DDPMScheduler.from_config(args.pretrained_model_name_or_path, subfolder="scheduler") - - train_dataset = DreamBoothDataset( - instance_data_root=args.instance_data_dir, - instance_prompt=args.instance_prompt, - class_data_root=args.class_data_dir if args.with_prior_preservation else None, - class_prompt=args.class_prompt, - tokenizer=tokenizer, - size=args.resolution, - center_crop=args.center_crop, - args=args, - ) - - def collate_fn(examples): - input_ids = [example["instance_prompt_ids"] for example in examples] - pixel_values = [example["instance_images"] for example in examples] - - # Concat class and instance examples for prior preservation. - # We do this to avoid doing two forward passes. - if args.with_prior_preservation: - input_ids += [example["class_prompt_ids"] for example in examples] - pixel_values += [example["class_images"] for example in examples] - - pixel_values = torch.stack(pixel_values) - pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() - - input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids - - batch = { - "input_ids": input_ids, - "pixel_values": pixel_values, - } - return batch - - train_dataloader = torch.utils.data.DataLoader( - train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn - ) - - # Scheduler and math around the number of training steps. - overrode_max_train_steps = False - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - overrode_max_train_steps = True - - lr_scheduler = get_scheduler( - args.lr_scheduler, - optimizer=optimizer, - num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, - num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, - ) - - if args.train_text_encoder: - unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, text_encoder, optimizer, train_dataloader, lr_scheduler - ) - else: - unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, optimizer, train_dataloader, lr_scheduler - ) - - weight_dtype = torch.float32 - if args.mixed_precision == "fp16": - weight_dtype = torch.float16 - elif args.mixed_precision == "bf16": - weight_dtype = torch.bfloat16 - - # Move text_encode and vae to gpu. - # For mixed precision training we cast the text_encoder and vae weights to half-precision - # as these models are only used for inference, keeping weights in full precision is not required. - vae.to(accelerator.device, dtype=weight_dtype) - if not args.train_text_encoder: - text_encoder.to(accelerator.device, dtype=weight_dtype) - - - if args.cache_latents: - latents_cache = [] - text_encoder_cache = [] - for batch in tqdm(train_dataloader, desc="Caching latents"): - with torch.no_grad(): - batch["pixel_values"] = batch["pixel_values"].to(accelerator.device, non_blocking=True, dtype=weight_dtype) - batch["input_ids"] = batch["input_ids"].to(accelerator.device, non_blocking=True) - latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist) - if args.train_text_encoder: - text_encoder_cache.append(batch["input_ids"]) - else: - text_encoder_cache.append(text_encoder(batch["input_ids"])[0]) - train_dataset = LatentsDataset(latents_cache, text_encoder_cache) - train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=1, collate_fn=lambda x: x, shuffle=True) - - del vae - #if not args.train_text_encoder: - # del text_encoder - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - # We need to recalculate our total training steps as the size of the training dataloader may have changed. - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if overrode_max_train_steps: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - # Afterwards we recalculate our number of training epochs - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - # We need to initialize the trackers we use, and also store our configuration. - # The trackers initializes automatically on the main process. - if accelerator.is_main_process: - accelerator.init_trackers("dreambooth", config=vars(args)) - - def bar(prg): - br='|'+'█' * prg + ' ' * (25-prg)+'|' - return br - - # Train! - total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num batches each epoch = {len(train_dataloader)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {args.max_train_steps}") - # Only show the progress bar once on each machine. - progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) - global_step = 0 - - for epoch in range(args.num_train_epochs): - unet.train() - if args.train_text_encoder: - text_encoder.train() - for step, batch in enumerate(train_dataloader): - with accelerator.accumulate(unet): - # Convert images to latent space - with torch.no_grad(): - if args.cache_latents: - latents_dist = batch[0][0] - else: - latents_dist = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist - latents = latents_dist.sample() * 0.18215 - - # Sample noise that we'll add to the latents - noise = torch.randn_like(latents) - bsz = latents.shape[0] - # Sample a random timestep for each image - timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) - timesteps = timesteps.long() - - # Add noise to the latents according to the noise magnitude at each timestep - # (this is the forward diffusion process) - noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) - - # Get the text embedding for conditioning - if(args.cache_latents): - if args.train_text_encoder: - encoder_hidden_states = text_encoder(batch[0][1])[0] - else: - encoder_hidden_states = batch[0][1] - else: - encoder_hidden_states = text_encoder(batch["input_ids"])[0] - - # Predict the noise residual - model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample - - # Get the target for loss depending on the prediction type - if noise_scheduler.config.prediction_type == "epsilon": - target = noise - elif noise_scheduler.config.prediction_type == "v_prediction": - target = noise_scheduler.get_velocity(latents, noise, timesteps) - else: - raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") - - if args.with_prior_preservation: - # Chunk the noise and model_pred into two parts and compute the loss on each part separately. - model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) - target, target_prior = torch.chunk(target, 2, dim=0) - - # Compute instance loss - loss = F.mse_loss(model_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean() - - # Compute prior loss - prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") - - # Add the prior loss to the instance loss. - loss = loss + args.prior_loss_weight * prior_loss - else: - loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") - - accelerator.backward(loss) - if accelerator.sync_gradients: - params_to_clip = ( - itertools.chain(unet.parameters(), text_encoder.parameters()) - if args.train_text_encoder - else unet.parameters() - ) - accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad() - - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - progress_bar.update(1) - global_step += 1 - - fll=round((global_step*100)/args.max_train_steps) - fll=round(fll/4) - pr=bar(fll) - - logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} - progress_bar.set_postfix(**logs) - progress_bar.set_description_str("Progress:"+pr) - accelerator.log(logs, step=global_step) - - if global_step >= args.max_train_steps: - break - - if args.train_text_encoder and global_step == args.stop_text_encoder_training and global_step >= 30: - if accelerator.is_main_process: - print(" " +" Freezing the text_encoder ..."+" ") - frz_dir=args.output_dir + "/text_encoder_frozen" - if os.path.exists(frz_dir): - subprocess.call('rm -r '+ frz_dir, shell=True) - os.mkdir(frz_dir) - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.text_encoder.save_pretrained(frz_dir) - - if args.save_n_steps >= 200: - if global_step < args.max_train_steps and global_step+1==i: - ckpt_name = "_step_" + str(global_step+1) - save_dir = Path(args.output_dir+ckpt_name) - save_dir=str(save_dir) - save_dir=save_dir.replace(" ", "_") - if not os.path.exists(save_dir): - os.mkdir(save_dir) - inst=save_dir[16:] - inst=inst.replace(" ", "_") - print(" SAVING CHECKPOINT: "+args.Session_dir+"/"+inst+".ckpt") - # Create the pipeline using the trained modules and save it. - if accelerator.is_main_process: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.save_pretrained(save_dir) - frz_dir=args.output_dir + "/text_encoder_frozen" - if args.train_text_encoder and os.path.exists(frz_dir): - subprocess.call('rm -r '+save_dir+'/text_encoder/*.*', shell=True) - subprocess.call('cp -f '+frz_dir +'/*.* '+ save_dir+'/text_encoder', shell=True) - chkpth=args.Session_dir+"/"+inst+".ckpt" - subprocess.call('python /content/diffusers/scripts/convert_diffusers_to_original_stable_diffusion.py --model_path ' + save_dir + ' --checkpoint_path ' + chkpth + ' --half', shell=True) - subprocess.call('rm -r '+ save_dir, shell=True) - i=i+args.save_n_steps - - accelerator.wait_for_everyone() - - # Create the pipeline using using the trained modules and save it. - if accelerator.is_main_process: - if args.dump_only_text_encoder: - txt_dir=args.output_dir + "/text_encoder_trained" - if not os.path.exists(txt_dir): - os.mkdir(txt_dir) - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.text_encoder.save_pretrained(txt_dir) - - elif args.train_only_unet: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.save_pretrained(args.output_dir) - txt_dir=args.output_dir + "/text_encoder_trained" - subprocess.call('rm -r '+txt_dir, shell=True) - - else: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - frz_dir=args.output_dir + "/text_encoder_frozen" - pipeline.save_pretrained(args.output_dir) - if args.train_text_encoder and os.path.exists(frz_dir): - subprocess.call('mv -f '+frz_dir +'/*.* '+ args.output_dir+'/text_encoder', shell=True) - subprocess.call('rm -r '+ frz_dir, shell=True) - - if args.push_to_hub: - repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True) - - accelerator.end_training() - del pipeline - torch.cuda.empty_cache() - gc.collect() -if __name__ == "__main__": - pass - #main() - diff --git a/spaces/xxxxxxianYu/vits-xxxxxxxxxxxxxxxxxx/commons.py b/spaces/xxxxxxianYu/vits-xxxxxxxxxxxxxxxxxx/commons.py deleted file mode 100644 index 40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9..0000000000000000000000000000000000000000 --- a/spaces/xxxxxxianYu/vits-xxxxxxxxxxxxxxxxxx/commons.py +++ /dev/null @@ -1,172 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/yangogo/bingo/src/lib/bots/bing/sr.ts b/spaces/yangogo/bingo/src/lib/bots/bing/sr.ts deleted file mode 100644 index 7cae14da7362bd6cc1e234851c11ca67e5a99f0c..0000000000000000000000000000000000000000 --- a/spaces/yangogo/bingo/src/lib/bots/bing/sr.ts +++ /dev/null @@ -1,106 +0,0 @@ -// @ts-ignore -const SpeechRecognitionPolyfill: typeof webkitSpeechRecognition = typeof window !== 'undefined' ? ( - // @ts-ignore - window.SpeechRecognition || - window.webkitSpeechRecognition || - // @ts-ignore - window.mozSpeechRecognition || - // @ts-ignore - window.msSpeechRecognition || - // @ts-ignore - window.oSpeechRecognition -) as typeof webkitSpeechRecognition : undefined - -type subscriber = (msg: string, command?: string) => void - -export class SR { - recognition?: SpeechRecognition - onchange?: subscriber - transcript: boolean = false - listening: boolean = false - private commandsRe?: RegExp - constructor(commands: string[]) { - this.recognition = SpeechRecognitionPolyfill ? new SpeechRecognitionPolyfill() : undefined - if (!this.recognition) { - return - } - this.configuration('zh-CN') - if (commands.length) { - this.commandsRe = new RegExp(`^(${commands.join('|')})。?$`) - } - this.recognition.onresult = this.speechRecognition - this.recognition.onerror = (err) => { - console.log('err', err.error) - this.stop() - } - this.recognition.onend = () => { - if (this.recognition && this.listening) { - this.recognition.start() - } - } - } - - speechRecognition = (event: SpeechRecognitionEvent) => { - if (!this.listening) return - for (var i = event.resultIndex; i < event.results.length; i++) { - let result = event.results[i] - if (result.isFinal) { - var alt = result[0] - const text = alt.transcript.trim() - if (this.commandsRe && this.commandsRe.test(text)) { - return this.onchange?.('', RegExp.$1) - } - if (!this.transcript) return - this.onchange?.(text) - } - } - } - - private configuration = async (lang: string = 'zh-CN') => { - return new Promise((resolve) => { - if (this.recognition) { - this.recognition.continuous = true - this.recognition.lang = lang - this.recognition.onstart = resolve - } - }) - } - - start = async () => { - if (this.recognition && !this.listening) { - await this.recognition.start() - this.transcript = true - this.listening = true - } - } - - stop = () => { - if (this.recognition) { - this.recognition.stop() - this.transcript = false - this.listening = false - } - } - - - pause = () => { - if (this.recognition) { - this.transcript = false - } - } - - resume = () => { - if (this.recognition) { - this.transcript = true - } - } - - abort = () => { - if (this.recognition && this.transcript) { - this.recognition.abort() - this.transcript = false - this.listening = false - } - } -} - diff --git a/spaces/yangy50/garbage-image-classification/app.py b/spaces/yangy50/garbage-image-classification/app.py deleted file mode 100644 index 408d98ef2a56288e17922d0142c9defe0947b274..0000000000000000000000000000000000000000 --- a/spaces/yangy50/garbage-image-classification/app.py +++ /dev/null @@ -1,91 +0,0 @@ -import streamlit as st -from PIL import Image - -from transformers import pipeline -import numpy as np -from transformers import AutoFeatureExtractor, AutoModelForImageClassification - - -st.set_page_config(layout='wide', - page_title='Garbage image classification' - ) - - - - -def main(): - - st.title("Garbage Classification") - st.markdown("## Overview") - st.markdown("### Backgroud") - st.markdown("Garbage classification refers to the separation of several types of different categories in accordance with the environmental impact of the use of the value of the composition of garbage components and the requirements of existing treatment methods.") - st.markdown("The significance of garbage classification: ") - st.markdown("1. Garbage classification reduces the mutual pollution between different garbage, which is beneficial to the recycling of materials. ") - st.markdown("2. Garbage classification is conducive to reducing the final waste disposal volume. ") - st.markdown("3. Garbage classification is conducive to enhancing the degree of social civilization.") - st.markdown("### Dataset") - st.markdown("The garbage classification dataset is from Kaggle. There are totally 2467 pictures in this dataset. And this model is an image classification model for this dataset. There are 6 classes for this dataset, which are cardboard (393), glass (491), metal (400), paper(584), plastic (472), and trash(127).") - st.markdown("### Model") - st.markdown("The model is based on the [ViT](https://huggingface.co/google/vit-base-patch16-224-in21k) model, which is short for the Vision Transformer. It was introduced in the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929), which was introduced in June 2021 by a team of researchers at Google Brain. And first released in [this repository](https://github.com/rwightman/pytorch-image-models). I trained this model with PyTorch. I think the most different thing between using the transformer to train on an image and on a text is in the tokenizing step. ") - st.markdown("There are 3 steps to tokenize the image:") - st.markdown("1. Split an image into a grid of sub-image patches") - st.markdown("2. Embed each patch with a linear projection") - st.markdown("3. Each embedded patch becomes a token, and the resulting sequence of embedded patches is the sequence you pass to the model.") - vit=Image.open("pic/vit-figure.jpg") - st.image(vit) - - st.markdown("I trained the model with 10 epochs, and I use Adam as the optimizer. The accuracy on the test set is 95%.") - st.markdown("## Huggingface Space") - st.markdown("The page here.") - st.markdown("## Huggingface Model Card") - st.markdown("Huggingface model card is [here](https://huggingface.co/yangy50/garbage-classification).") - st.markdown("## Critical Analysis") - st.markdown("1. Next step: build a CNN model on this dataset and compare the accuracy and training time for these two models.") - st.markdown("2. Didn’t use the Dataset package to store the image data. Want to find out how to use the Dataset package to handle image data.") - st.markdown("## Resource Links") - st.markdown("[vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k)") - st.markdown("[Garbage dataset](https://huggingface.co/cardiffnlp/twitter-roberta-base)") - st.markdown("[An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929)") - st.markdown("## Git Repo") - st.markdown("[Code Demo](https://github.com/yuechen-yang/garbage-classification) ") - st.markdown("## Video Recording") - st.markdown("[Video](https://drive.google.com/file/d/1Kazf1WdYyf8fTNSfipGddPWu1x0skrw5/view?usp=sharing)") - - - st.header("Try it out!") - - uploaded_file = st.file_uploader("Upload Files",type=['png','jpeg','jpg']) - - if uploaded_file!=None: - - img=Image.open(uploaded_file) - - extractor = AutoFeatureExtractor.from_pretrained("yangy50/garbage-classification") - model = AutoModelForImageClassification.from_pretrained("yangy50/garbage-classification") - - inputs = extractor(img,return_tensors="pt") - outputs = model(**inputs) - label_num=outputs.logits.softmax(1).argmax(1) - label_num=label_num.item() - - st.write("The prediction class is:") - - if label_num==0: - st.write("cardboard") - elif label_num==1: - st.write("glass") - elif label_num==2: - st.write("metal") - elif label_num==3: - st.write("paper") - elif label_num==4: - st.write("plastic") - else: - st.write("trash") - - st.image(img) - - - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/components/ArrangeView/ArrangeToolbar.tsx b/spaces/yderre-aubay/midi-player-demo/src/main/components/ArrangeView/ArrangeToolbar.tsx deleted file mode 100644 index d884cb9daa3409aa63edab2fb61477c56d5647b0..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/components/ArrangeView/ArrangeToolbar.tsx +++ /dev/null @@ -1,56 +0,0 @@ -import styled from "@emotion/styled" -import { observer } from "mobx-react-lite" -import { FC, useCallback } from "react" -import { Localized } from "../../../components/Localized" -import { useStores } from "../../hooks/useStores" -import { AutoScrollButton } from "../Toolbar/AutoScrollButton" -import QuantizeSelector from "../Toolbar/QuantizeSelector/QuantizeSelector" -import { Toolbar } from "../Toolbar/Toolbar" - -const Title = styled.div` - font-weight: bold; - font-size: 1rem; - white-space: nowrap; - overflow: hidden; - text-overflow: ellipsis; - max-width: 14rem; - min-width: 3em; -` - -const FlexibleSpacer = styled.div` - flex-grow: 1; -` - -export const ArrangeToolbar: FC = observer(() => { - const { arrangeViewStore } = useStores() - const { quantize, autoScroll } = arrangeViewStore - - const onClickAutoScroll = useCallback( - () => (arrangeViewStore.autoScroll = !arrangeViewStore.autoScroll), - [arrangeViewStore], - ) - - const onSelectQuantize = useCallback( - (denominator: number) => (arrangeViewStore.quantize = denominator), - [arrangeViewStore], - ) - - return ( - - - <Localized default="Arrangement View">arrangement-view</Localized> - - - - - {}} - /> - - - - ) -}) diff --git a/spaces/yefengzi/vits-models/text/cleaners.py b/spaces/yefengzi/vits-models/text/cleaners.py deleted file mode 100644 index d26581deb399609163518054718ad80ecca5d934..0000000000000000000000000000000000000000 --- a/spaces/yefengzi/vits-models/text/cleaners.py +++ /dev/null @@ -1,475 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - -import re -from unidecode import unidecode -import pyopenjtalk -from jamo import h2j, j2hcj -from pypinyin import lazy_pinyin, BOPOMOFO -import jieba, cn2an - - -# This is a list of Korean classifiers preceded by pure Korean numerals. -_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' - -# Regular expression matching whitespace: -_whitespace_re = re.compile(r'\s+') - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - -# List of (hangul, hangul divided) pairs: -_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄳ', 'ㄱㅅ'), - ('ㄵ', 'ㄴㅈ'), - ('ㄶ', 'ㄴㅎ'), - ('ㄺ', 'ㄹㄱ'), - ('ㄻ', 'ㄹㅁ'), - ('ㄼ', 'ㄹㅂ'), - ('ㄽ', 'ㄹㅅ'), - ('ㄾ', 'ㄹㅌ'), - ('ㄿ', 'ㄹㅍ'), - ('ㅀ', 'ㄹㅎ'), - ('ㅄ', 'ㅂㅅ'), - ('ㅘ', 'ㅗㅏ'), - ('ㅙ', 'ㅗㅐ'), - ('ㅚ', 'ㅗㅣ'), - ('ㅝ', 'ㅜㅓ'), - ('ㅞ', 'ㅜㅔ'), - ('ㅟ', 'ㅜㅣ'), - ('ㅢ', 'ㅡㅣ'), - ('ㅑ', 'ㅣㅏ'), - ('ㅒ', 'ㅣㅐ'), - ('ㅕ', 'ㅣㅓ'), - ('ㅖ', 'ㅣㅔ'), - ('ㅛ', 'ㅣㅗ'), - ('ㅠ', 'ㅣㅜ') -]] - -# List of (Latin alphabet, hangul) pairs: -_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', '에이'), - ('b', '비'), - ('c', '시'), - ('d', '디'), - ('e', '이'), - ('f', '에프'), - ('g', '지'), - ('h', '에이치'), - ('i', '아이'), - ('j', '제이'), - ('k', '케이'), - ('l', '엘'), - ('m', '엠'), - ('n', '엔'), - ('o', '오'), - ('p', '피'), - ('q', '큐'), - ('r', '아르'), - ('s', '에스'), - ('t', '티'), - ('u', '유'), - ('v', '브이'), - ('w', '더블유'), - ('x', '엑스'), - ('y', '와이'), - ('z', '제트') -]] - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def lowercase(text): - return text.lower() - - -def collapse_whitespace(text): - return re.sub(_whitespace_re, ' ', text) - - -def convert_to_ascii(text): - return unidecode(text) - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text!='': - text+=' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil','pau']: - text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q') - else: - continue - n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']: - a2_next=-1 - else: - a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i 0 - # To capture wide-context information in conditional features - kernel_size = aux_context_window + 1 if use_causal_conv else 2 * aux_context_window + 1 - # NOTE(kan-bayashi): Here do not use padding because the input is already padded - self.conv_in = Conv1d(aux_channels, aux_channels, kernel_size=kernel_size, bias=False) - self.upsample = UpsampleNetwork( - upsample_scales=upsample_scales, - nonlinear_activation=nonlinear_activation, - nonlinear_activation_params=nonlinear_activation_params, - interpolate_mode=interpolate_mode, - freq_axis_kernel_size=freq_axis_kernel_size, - use_causal_conv=use_causal_conv, - ) - - def forward(self, c): - """Calculate forward propagation. - - Args: - c : Input tensor (B, C, T'). - - Returns: - Tensor: Upsampled tensor (B, C, T), - where T = (T' - aux_context_window * 2) * prod(upsample_scales). - - Note: - The length of inputs considers the context window size. - - """ - c_ = self.conv_in(c) - c = c_[:, :, :-self.aux_context_window] if self.use_causal_conv else c_ - return self.upsample(c) diff --git a/spaces/yl12053/so-vits-4.1-Kitasan-Black/inference/__init__.py b/spaces/yl12053/so-vits-4.1-Kitasan-Black/inference/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/datasets/coco.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/datasets/coco.py deleted file mode 100644 index f8496aacf2eda691e55a8fabfc0f5db496dcc186..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/datasets/coco.py +++ /dev/null @@ -1,49 +0,0 @@ -import os - -from detectron2.data.datasets.register_coco import register_coco_instances -from detectron2.data.datasets.coco import load_coco_json -from detectron2.data.datasets.builtin_meta import _get_builtin_metadata -from detectron2.data import DatasetCatalog, MetadataCatalog - - -def register_distill_coco_instances(name, metadata, json_file, image_root): - """ - add extra_annotation_keys - """ - assert isinstance(name, str), name - assert isinstance(json_file, (str, os.PathLike)), json_file - assert isinstance(image_root, (str, os.PathLike)), image_root - # 1. register a function which returns dicts - DatasetCatalog.register(name, lambda: load_coco_json( - json_file, image_root, name, extra_annotation_keys=['score'])) - - # 2. Optionally, add metadata about this dataset, - # since they might be useful in evaluation, visualization or logging - MetadataCatalog.get(name).set( - json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata - ) - - -_PREDEFINED_SPLITS_COCO = { - "coco_2017_unlabeled": ("coco/unlabeled2017", "coco/annotations/image_info_unlabeled2017.json"), -} - -for key, (image_root, json_file) in _PREDEFINED_SPLITS_COCO.items(): - register_coco_instances( - key, - _get_builtin_metadata('coco'), - os.path.join("datasets", json_file) if "://" not in json_file else json_file, - os.path.join("datasets", image_root), - ) - -_PREDEFINED_SPLITS_DISTILL_COCO = { - "coco_un_yolov4_55_0.5": ("coco/unlabeled2017", "coco/annotations/yolov4_cocounlabeled_55_ann0.5.json"), -} - -for key, (image_root, json_file) in _PREDEFINED_SPLITS_DISTILL_COCO.items(): - register_distill_coco_instances( - key, - _get_builtin_metadata('coco'), - os.path.join("datasets", json_file) if "://" not in json_file else json_file, - os.path.join("datasets", image_root), - ) \ No newline at end of file diff --git a/spaces/ynhe/AskAnything/transforms.py b/spaces/ynhe/AskAnything/transforms.py deleted file mode 100644 index ea7a1aeb20ced1961d69e908ba932e9cc809648c..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/transforms.py +++ /dev/null @@ -1,443 +0,0 @@ -import torchvision -import random -from PIL import Image, ImageOps -import numpy as np -import numbers -import math -import torch - - -class GroupRandomCrop(object): - def __init__(self, size): - if isinstance(size, numbers.Number): - self.size = (int(size), int(size)) - else: - self.size = size - - def __call__(self, img_group): - - w, h = img_group[0].size - th, tw = self.size - - out_images = list() - - x1 = random.randint(0, w - tw) - y1 = random.randint(0, h - th) - - for img in img_group: - assert(img.size[0] == w and img.size[1] == h) - if w == tw and h == th: - out_images.append(img) - else: - out_images.append(img.crop((x1, y1, x1 + tw, y1 + th))) - - return out_images - - -class MultiGroupRandomCrop(object): - def __init__(self, size, groups=1): - if isinstance(size, numbers.Number): - self.size = (int(size), int(size)) - else: - self.size = size - self.groups = groups - - def __call__(self, img_group): - - w, h = img_group[0].size - th, tw = self.size - - out_images = list() - - for i in range(self.groups): - x1 = random.randint(0, w - tw) - y1 = random.randint(0, h - th) - - for img in img_group: - assert(img.size[0] == w and img.size[1] == h) - if w == tw and h == th: - out_images.append(img) - else: - out_images.append(img.crop((x1, y1, x1 + tw, y1 + th))) - - return out_images - - -class GroupCenterCrop(object): - def __init__(self, size): - self.worker = torchvision.transforms.CenterCrop(size) - - def __call__(self, img_group): - return [self.worker(img) for img in img_group] - - -class GroupRandomHorizontalFlip(object): - """Randomly horizontally flips the given PIL.Image with a probability of 0.5 - """ - - def __init__(self, is_flow=False): - self.is_flow = is_flow - - def __call__(self, img_group, is_flow=False): - v = random.random() - if v < 0.5: - ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group] - if self.is_flow: - for i in range(0, len(ret), 2): - # invert flow pixel values when flipping - ret[i] = ImageOps.invert(ret[i]) - return ret - else: - return img_group - - -class GroupNormalize(object): - def __init__(self, mean, std): - self.mean = mean - self.std = std - - def __call__(self, tensor): - rep_mean = self.mean * (tensor.size()[0] // len(self.mean)) - rep_std = self.std * (tensor.size()[0] // len(self.std)) - - # TODO: make efficient - for t, m, s in zip(tensor, rep_mean, rep_std): - t.sub_(m).div_(s) - - return tensor - - -class GroupScale(object): - """ Rescales the input PIL.Image to the given 'size'. - 'size' will be the size of the smaller edge. - For example, if height > width, then image will be - rescaled to (size * height / width, size) - size: size of the smaller edge - interpolation: Default: PIL.Image.BILINEAR - """ - - def __init__(self, size, interpolation=Image.BILINEAR): - self.worker = torchvision.transforms.Resize(size, interpolation) - - def __call__(self, img_group): - return [self.worker(img) for img in img_group] - - -class GroupOverSample(object): - def __init__(self, crop_size, scale_size=None, flip=True): - self.crop_size = crop_size if not isinstance( - crop_size, int) else (crop_size, crop_size) - - if scale_size is not None: - self.scale_worker = GroupScale(scale_size) - else: - self.scale_worker = None - self.flip = flip - - def __call__(self, img_group): - - if self.scale_worker is not None: - img_group = self.scale_worker(img_group) - - image_w, image_h = img_group[0].size - crop_w, crop_h = self.crop_size - - offsets = GroupMultiScaleCrop.fill_fix_offset( - False, image_w, image_h, crop_w, crop_h) - oversample_group = list() - for o_w, o_h in offsets: - normal_group = list() - flip_group = list() - for i, img in enumerate(img_group): - crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h)) - normal_group.append(crop) - flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT) - - if img.mode == 'L' and i % 2 == 0: - flip_group.append(ImageOps.invert(flip_crop)) - else: - flip_group.append(flip_crop) - - oversample_group.extend(normal_group) - if self.flip: - oversample_group.extend(flip_group) - return oversample_group - - -class GroupFullResSample(object): - def __init__(self, crop_size, scale_size=None, flip=True): - self.crop_size = crop_size if not isinstance( - crop_size, int) else (crop_size, crop_size) - - if scale_size is not None: - self.scale_worker = GroupScale(scale_size) - else: - self.scale_worker = None - self.flip = flip - - def __call__(self, img_group): - - if self.scale_worker is not None: - img_group = self.scale_worker(img_group) - - image_w, image_h = img_group[0].size - crop_w, crop_h = self.crop_size - - w_step = (image_w - crop_w) // 4 - h_step = (image_h - crop_h) // 4 - - offsets = list() - offsets.append((0 * w_step, 2 * h_step)) # left - offsets.append((4 * w_step, 2 * h_step)) # right - offsets.append((2 * w_step, 2 * h_step)) # center - - oversample_group = list() - for o_w, o_h in offsets: - normal_group = list() - flip_group = list() - for i, img in enumerate(img_group): - crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h)) - normal_group.append(crop) - if self.flip: - flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT) - - if img.mode == 'L' and i % 2 == 0: - flip_group.append(ImageOps.invert(flip_crop)) - else: - flip_group.append(flip_crop) - - oversample_group.extend(normal_group) - oversample_group.extend(flip_group) - return oversample_group - - -class GroupMultiScaleCrop(object): - - def __init__(self, input_size, scales=None, max_distort=1, - fix_crop=True, more_fix_crop=True): - self.scales = scales if scales is not None else [1, .875, .75, .66] - self.max_distort = max_distort - self.fix_crop = fix_crop - self.more_fix_crop = more_fix_crop - self.input_size = input_size if not isinstance(input_size, int) else [ - input_size, input_size] - self.interpolation = Image.BILINEAR - - def __call__(self, img_group): - - im_size = img_group[0].size - - crop_w, crop_h, offset_w, offset_h = self._sample_crop_size(im_size) - crop_img_group = [ - img.crop( - (offset_w, - offset_h, - offset_w + - crop_w, - offset_h + - crop_h)) for img in img_group] - ret_img_group = [img.resize((self.input_size[0], self.input_size[1]), self.interpolation) - for img in crop_img_group] - return ret_img_group - - def _sample_crop_size(self, im_size): - image_w, image_h = im_size[0], im_size[1] - - # find a crop size - base_size = min(image_w, image_h) - crop_sizes = [int(base_size * x) for x in self.scales] - crop_h = [ - self.input_size[1] if abs( - x - self.input_size[1]) < 3 else x for x in crop_sizes] - crop_w = [ - self.input_size[0] if abs( - x - self.input_size[0]) < 3 else x for x in crop_sizes] - - pairs = [] - for i, h in enumerate(crop_h): - for j, w in enumerate(crop_w): - if abs(i - j) <= self.max_distort: - pairs.append((w, h)) - - crop_pair = random.choice(pairs) - if not self.fix_crop: - w_offset = random.randint(0, image_w - crop_pair[0]) - h_offset = random.randint(0, image_h - crop_pair[1]) - else: - w_offset, h_offset = self._sample_fix_offset( - image_w, image_h, crop_pair[0], crop_pair[1]) - - return crop_pair[0], crop_pair[1], w_offset, h_offset - - def _sample_fix_offset(self, image_w, image_h, crop_w, crop_h): - offsets = self.fill_fix_offset( - self.more_fix_crop, image_w, image_h, crop_w, crop_h) - return random.choice(offsets) - - @staticmethod - def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h): - w_step = (image_w - crop_w) // 4 - h_step = (image_h - crop_h) // 4 - - ret = list() - ret.append((0, 0)) # upper left - ret.append((4 * w_step, 0)) # upper right - ret.append((0, 4 * h_step)) # lower left - ret.append((4 * w_step, 4 * h_step)) # lower right - ret.append((2 * w_step, 2 * h_step)) # center - - if more_fix_crop: - ret.append((0, 2 * h_step)) # center left - ret.append((4 * w_step, 2 * h_step)) # center right - ret.append((2 * w_step, 4 * h_step)) # lower center - ret.append((2 * w_step, 0 * h_step)) # upper center - - ret.append((1 * w_step, 1 * h_step)) # upper left quarter - ret.append((3 * w_step, 1 * h_step)) # upper right quarter - ret.append((1 * w_step, 3 * h_step)) # lower left quarter - ret.append((3 * w_step, 3 * h_step)) # lower righ quarter - - return ret - - -class GroupRandomSizedCrop(object): - """Random crop the given PIL.Image to a random size of (0.08 to 1.0) of the original size - and and a random aspect ratio of 3/4 to 4/3 of the original aspect ratio - This is popularly used to train the Inception networks - size: size of the smaller edge - interpolation: Default: PIL.Image.BILINEAR - """ - - def __init__(self, size, interpolation=Image.BILINEAR): - self.size = size - self.interpolation = interpolation - - def __call__(self, img_group): - for attempt in range(10): - area = img_group[0].size[0] * img_group[0].size[1] - target_area = random.uniform(0.08, 1.0) * area - aspect_ratio = random.uniform(3. / 4, 4. / 3) - - w = int(round(math.sqrt(target_area * aspect_ratio))) - h = int(round(math.sqrt(target_area / aspect_ratio))) - - if random.random() < 0.5: - w, h = h, w - - if w <= img_group[0].size[0] and h <= img_group[0].size[1]: - x1 = random.randint(0, img_group[0].size[0] - w) - y1 = random.randint(0, img_group[0].size[1] - h) - found = True - break - else: - found = False - x1 = 0 - y1 = 0 - - if found: - out_group = list() - for img in img_group: - img = img.crop((x1, y1, x1 + w, y1 + h)) - assert(img.size == (w, h)) - out_group.append( - img.resize( - (self.size, self.size), self.interpolation)) - return out_group - else: - # Fallback - scale = GroupScale(self.size, interpolation=self.interpolation) - crop = GroupRandomCrop(self.size) - return crop(scale(img_group)) - - -class ConvertDataFormat(object): - def __init__(self, model_type): - self.model_type = model_type - - def __call__(self, images): - if self.model_type == '2D': - return images - tc, h, w = images.size() - t = tc // 3 - images = images.view(t, 3, h, w) - images = images.permute(1, 0, 2, 3) - return images - - -class Stack(object): - - def __init__(self, roll=False): - self.roll = roll - - def __call__(self, img_group): - if img_group[0].mode == 'L': - return np.concatenate([np.expand_dims(x, 2) - for x in img_group], axis=2) - elif img_group[0].mode == 'RGB': - if self.roll: - return np.concatenate([np.array(x)[:, :, ::-1] - for x in img_group], axis=2) - else: - #print(np.concatenate(img_group, axis=2).shape) - # print(img_group[0].shape) - return np.concatenate(img_group, axis=2) - - -class ToTorchFormatTensor(object): - """ Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255] - to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] """ - - def __init__(self, div=True): - self.div = div - - def __call__(self, pic): - if isinstance(pic, np.ndarray): - # handle numpy array - img = torch.from_numpy(pic).permute(2, 0, 1).contiguous() - else: - # handle PIL Image - img = torch.ByteTensor( - torch.ByteStorage.from_buffer( - pic.tobytes())) - img = img.view(pic.size[1], pic.size[0], len(pic.mode)) - # put it from HWC to CHW format - # yikes, this transpose takes 80% of the loading time/CPU - img = img.transpose(0, 1).transpose(0, 2).contiguous() - return img.float().div(255) if self.div else img.float() - - -class IdentityTransform(object): - - def __call__(self, data): - return data - - -if __name__ == "__main__": - trans = torchvision.transforms.Compose([ - GroupScale(256), - GroupRandomCrop(224), - Stack(), - ToTorchFormatTensor(), - GroupNormalize( - mean=[.485, .456, .406], - std=[.229, .224, .225] - )] - ) - - im = Image.open('../tensorflow-model-zoo.torch/lena_299.png') - - color_group = [im] * 3 - rst = trans(color_group) - - gray_group = [im.convert('L')] * 9 - gray_rst = trans(gray_group) - - trans2 = torchvision.transforms.Compose([ - GroupRandomSizedCrop(256), - Stack(), - ToTorchFormatTensor(), - GroupNormalize( - mean=[.485, .456, .406], - std=[.229, .224, .225]) - ]) - print(trans2(color_group)) \ No newline at end of file diff --git a/spaces/yo2266911/uma_voice/preprocess.py b/spaces/yo2266911/uma_voice/preprocess.py deleted file mode 100644 index d330e54728733dbc75594d239d789be42ecb4d01..0000000000000000000000000000000000000000 --- a/spaces/yo2266911/uma_voice/preprocess.py +++ /dev/null @@ -1,25 +0,0 @@ -import argparse -import text -from utils import load_filepaths_and_text - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("--out_extension", default="cleaned") - parser.add_argument("--text_index", default=2, type=int) - parser.add_argument("--filelists", nargs="+", default=["E:/uma_voice/output_train.txt", "E:/uma_voice/output_val.txt"]) - parser.add_argument("--text_cleaners", nargs="+", default=["japanese_cleaners"]) - - args = parser.parse_args() - - - for filelist in args.filelists: - print("START:", filelist) - filepaths_and_text = load_filepaths_and_text(filelist) - for i in range(len(filepaths_and_text)): - original_text = filepaths_and_text[i][args.text_index] - cleaned_text = text._clean_text(original_text, args.text_cleaners) - filepaths_and_text[i][args.text_index] = cleaned_text - - new_filelist = filelist + "." + args.out_extension - with open(new_filelist, "w", encoding="utf-8") as f: - f.writelines(["|".join(x) + "\n" for x in filepaths_and_text]) diff --git a/spaces/yuhangzang/ContextDet-Demo/csrc/MsDeformAttn/ms_deform_attn.h b/spaces/yuhangzang/ContextDet-Demo/csrc/MsDeformAttn/ms_deform_attn.h deleted file mode 100644 index c7408eba007b424194618baa63726657e36875e3..0000000000000000000000000000000000000000 --- a/spaces/yuhangzang/ContextDet-Demo/csrc/MsDeformAttn/ms_deform_attn.h +++ /dev/null @@ -1,64 +0,0 @@ -/*! -************************************************************************************************** -* Deformable DETR -* Copyright (c) 2020 SenseTime. All Rights Reserved. -* Licensed under the Apache License, Version 2.0 [see LICENSE for details] -************************************************************************************************** -* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -************************************************************************************************** -*/ - -#pragma once - -#include "ms_deform_attn_cpu.h" - -#ifdef WITH_CUDA -#include "ms_deform_attn_cuda.h" -#endif - -namespace groundingdino { - -at::Tensor -ms_deform_attn_forward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const int im2col_step) -{ - if (value.type().is_cuda()) - { -#ifdef WITH_CUDA - return ms_deform_attn_cuda_forward( - value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - AT_ERROR("Not implemented on the CPU"); -} - -std::vector -ms_deform_attn_backward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const at::Tensor &grad_output, - const int im2col_step) -{ - if (value.type().is_cuda()) - { -#ifdef WITH_CUDA - return ms_deform_attn_cuda_backward( - value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - AT_ERROR("Not implemented on the CPU"); -} - -} // namespace groundingdino \ No newline at end of file diff --git a/spaces/zeno-ml/openai-evals/zeno-evals-hub/frontend/build/assets/main-55272372.css b/spaces/zeno-ml/openai-evals/zeno-evals-hub/frontend/build/assets/main-55272372.css deleted file mode 100644 index 7526da8134812d4d10effc416c5b656d4657c6d3..0000000000000000000000000000000000000000 --- a/spaces/zeno-ml/openai-evals/zeno-evals-hub/frontend/build/assets/main-55272372.css +++ /dev/null @@ -1 +0,0 @@ -iframe.svelte-195gryk.svelte-195gryk{margin-left:10px}#container.svelte-195gryk.svelte-195gryk{margin:50px 20px;display:flex;justify-content:center}#table-background.svelte-195gryk.svelte-195gryk{width:1100px;min-width:900px;padding:20px;border-radius:20px}.name-wrap.svelte-195gryk.svelte-195gryk{border:1px solid transparent;border-radius:10px;font-weight:500;color:var(--logo)}.name-wrap.svelte-195gryk.svelte-195gryk:hover{color:var(--P2)}table.svelte-195gryk.svelte-195gryk{border-collapse:collapse;text-align:left;cursor:default;margin-left:auto;margin-right:auto}table.svelte-195gryk thead tr th.svelte-195gryk{border-bottom:.5px solid grey}table.svelte-195gryk th.svelte-195gryk,table.svelte-195gryk td.svelte-195gryk{padding:4px 25px}table.svelte-195gryk td.svelte-195gryk:first-child,table.svelte-195gryk th.svelte-195gryk:first-child{border-radius:20px 0 0 20px}table.svelte-195gryk td.svelte-195gryk:last-child,table.svelte-195gryk th.svelte-195gryk:last-child{border-radius:0 20px 20px 0}tbody.svelte-195gryk.svelte-195gryk:before{content:"@";display:block;line-height:10px;text-indent:-99999px}thead.svelte-195gryk tr.svelte-195gryk{color:#213547}tbody.svelte-195gryk tr.svelte-195gryk{opacity:.9;height:70px}.open_ai.svelte-195gryk.svelte-195gryk{width:20px;margin-left:5px}.tagline.svelte-195gryk.svelte-195gryk{font-size:18px;text-align:center;display:flex;justify-content:center;align-items:center}.tagline.svelte-195gryk b.svelte-195gryk{margin-right:5px;margin-left:5px}header.svelte-195gryk.svelte-195gryk{display:flex;align-items:center;justify-content:center} diff --git a/spaces/zhang-wei-jian/docker/node_modules/simple-update-notifier/node_modules/semver/ranges/to-comparators.js b/spaces/zhang-wei-jian/docker/node_modules/simple-update-notifier/node_modules/semver/ranges/to-comparators.js deleted file mode 100644 index 6c8bc7e6f15a408e3707195add970bf707817dd9..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/simple-update-notifier/node_modules/semver/ranges/to-comparators.js +++ /dev/null @@ -1,8 +0,0 @@ -const Range = require('../classes/range') - -// Mostly just for testing and legacy API reasons -const toComparators = (range, options) => - new Range(range, options).set - .map(comp => comp.map(c => c.value).join(' ').trim().split(' ')) - -module.exports = toComparators diff --git a/spaces/zideliu/styledrop/timm/data/transforms_factory.py b/spaces/zideliu/styledrop/timm/data/transforms_factory.py deleted file mode 100644 index 01c9fcf238ec7f856053683348cc9edde1640370..0000000000000000000000000000000000000000 --- a/spaces/zideliu/styledrop/timm/data/transforms_factory.py +++ /dev/null @@ -1,236 +0,0 @@ -""" Transforms Factory -Factory methods for building image transforms for use with TIMM (PyTorch Image Models) - -Hacked together by / Copyright 2020 Ross Wightman -""" -import math - -import torch -from torchvision import transforms - -from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT -from timm.data.auto_augment import rand_augment_transform, augment_and_mix_transform, auto_augment_transform -from timm.data.transforms import _pil_interp, RandomResizedCropAndInterpolation, ToNumpy, ToTensor -from timm.data.random_erasing import RandomErasing - - -def transforms_noaug_train( - img_size=224, - interpolation='bilinear', - use_prefetcher=False, - mean=IMAGENET_DEFAULT_MEAN, - std=IMAGENET_DEFAULT_STD, -): - if interpolation == 'random': - # random interpolation not supported with no-aug - interpolation = 'bilinear' - tfl = [ - transforms.Resize(img_size, _pil_interp(interpolation)), - transforms.CenterCrop(img_size) - ] - if use_prefetcher: - # prefetcher and collate will handle tensor conversion and norm - tfl += [ToNumpy()] - else: - tfl += [ - transforms.ToTensor(), - transforms.Normalize( - mean=torch.tensor(mean), - std=torch.tensor(std)) - ] - return transforms.Compose(tfl) - - -def transforms_imagenet_train( - img_size=224, - scale=None, - ratio=None, - hflip=0.5, - vflip=0., - color_jitter=0.4, - auto_augment=None, - interpolation='random', - use_prefetcher=False, - mean=IMAGENET_DEFAULT_MEAN, - std=IMAGENET_DEFAULT_STD, - re_prob=0., - re_mode='const', - re_count=1, - re_num_splits=0, - separate=False, -): - """ - If separate==True, the transforms are returned as a tuple of 3 separate transforms - for use in a mixing dataset that passes - * all data through the first (primary) transform, called the 'clean' data - * a portion of the data through the secondary transform - * normalizes and converts the branches above with the third, final transform - """ - scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range - ratio = tuple(ratio or (3./4., 4./3.)) # default imagenet ratio range - primary_tfl = [ - RandomResizedCropAndInterpolation(img_size, scale=scale, ratio=ratio, interpolation=interpolation)] - if hflip > 0.: - primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)] - if vflip > 0.: - primary_tfl += [transforms.RandomVerticalFlip(p=vflip)] - - secondary_tfl = [] - if auto_augment: - assert isinstance(auto_augment, str) - if isinstance(img_size, tuple): - img_size_min = min(img_size) - else: - img_size_min = img_size - aa_params = dict( - translate_const=int(img_size_min * 0.45), - img_mean=tuple([min(255, round(255 * x)) for x in mean]), - ) - if interpolation and interpolation != 'random': - aa_params['interpolation'] = _pil_interp(interpolation) - if auto_augment.startswith('rand'): - secondary_tfl += [rand_augment_transform(auto_augment, aa_params)] - elif auto_augment.startswith('augmix'): - aa_params['translate_pct'] = 0.3 - secondary_tfl += [augment_and_mix_transform(auto_augment, aa_params)] - else: - secondary_tfl += [auto_augment_transform(auto_augment, aa_params)] - elif color_jitter is not None: - # color jitter is enabled when not using AA - if isinstance(color_jitter, (list, tuple)): - # color jitter should be a 3-tuple/list if spec brightness/contrast/saturation - # or 4 if also augmenting hue - assert len(color_jitter) in (3, 4) - else: - # if it's a scalar, duplicate for brightness, contrast, and saturation, no hue - color_jitter = (float(color_jitter),) * 3 - secondary_tfl += [transforms.ColorJitter(*color_jitter)] - - final_tfl = [] - if use_prefetcher: - # prefetcher and collate will handle tensor conversion and norm - final_tfl += [ToNumpy()] - else: - final_tfl += [ - transforms.ToTensor(), - transforms.Normalize( - mean=torch.tensor(mean), - std=torch.tensor(std)) - ] - if re_prob > 0.: - final_tfl.append( - RandomErasing(re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device='cpu')) - - if separate: - return transforms.Compose(primary_tfl), transforms.Compose(secondary_tfl), transforms.Compose(final_tfl) - else: - return transforms.Compose(primary_tfl + secondary_tfl + final_tfl) - - -def transforms_imagenet_eval( - img_size=224, - crop_pct=None, - interpolation='bilinear', - use_prefetcher=False, - mean=IMAGENET_DEFAULT_MEAN, - std=IMAGENET_DEFAULT_STD): - crop_pct = crop_pct or DEFAULT_CROP_PCT - - if isinstance(img_size, tuple): - assert len(img_size) == 2 - if img_size[-1] == img_size[-2]: - # fall-back to older behaviour so Resize scales to shortest edge if target is square - scale_size = int(math.floor(img_size[0] / crop_pct)) - else: - scale_size = tuple([int(x / crop_pct) for x in img_size]) - else: - scale_size = int(math.floor(img_size / crop_pct)) - - tfl = [ - transforms.Resize(scale_size, _pil_interp(interpolation)), - transforms.CenterCrop(img_size), - ] - if use_prefetcher: - # prefetcher and collate will handle tensor conversion and norm - tfl += [ToNumpy()] - else: - tfl += [ - transforms.ToTensor(), - transforms.Normalize( - mean=torch.tensor(mean), - std=torch.tensor(std)) - ] - - return transforms.Compose(tfl) - - -def create_transform( - input_size, - is_training=False, - use_prefetcher=False, - no_aug=False, - scale=None, - ratio=None, - hflip=0.5, - vflip=0., - color_jitter=0.4, - auto_augment=None, - interpolation='bilinear', - mean=IMAGENET_DEFAULT_MEAN, - std=IMAGENET_DEFAULT_STD, - re_prob=0., - re_mode='const', - re_count=1, - re_num_splits=0, - crop_pct=None, - tf_preprocessing=False, - separate=False): - - if isinstance(input_size, tuple): - img_size = input_size[-2:] - else: - img_size = input_size - - if tf_preprocessing and use_prefetcher: - assert not separate, "Separate transforms not supported for TF preprocessing" - from timm.data.tf_preprocessing import TfPreprocessTransform - transform = TfPreprocessTransform( - is_training=is_training, size=img_size, interpolation=interpolation) - else: - if is_training and no_aug: - assert not separate, "Cannot perform split augmentation with no_aug" - transform = transforms_noaug_train( - img_size, - interpolation=interpolation, - use_prefetcher=use_prefetcher, - mean=mean, - std=std) - elif is_training: - transform = transforms_imagenet_train( - img_size, - scale=scale, - ratio=ratio, - hflip=hflip, - vflip=vflip, - color_jitter=color_jitter, - auto_augment=auto_augment, - interpolation=interpolation, - use_prefetcher=use_prefetcher, - mean=mean, - std=std, - re_prob=re_prob, - re_mode=re_mode, - re_count=re_count, - re_num_splits=re_num_splits, - separate=separate) - else: - assert not separate, "Separate transforms not supported for validation preprocessing" - transform = transforms_imagenet_eval( - img_size, - interpolation=interpolation, - use_prefetcher=use_prefetcher, - mean=mean, - std=std, - crop_pct=crop_pct) - - return transform diff --git a/spaces/zlc99/M4Singer/modules/commons/ssim.py b/spaces/zlc99/M4Singer/modules/commons/ssim.py deleted file mode 100644 index 0d0241f267ef58b24979e022b05f2a9adf768826..0000000000000000000000000000000000000000 --- a/spaces/zlc99/M4Singer/modules/commons/ssim.py +++ /dev/null @@ -1,391 +0,0 @@ -# ''' -# https://github.com/One-sixth/ms_ssim_pytorch/blob/master/ssim.py -# ''' -# -# import torch -# import torch.jit -# import torch.nn.functional as F -# -# -# @torch.jit.script -# def create_window(window_size: int, sigma: float, channel: int): -# ''' -# Create 1-D gauss kernel -# :param window_size: the size of gauss kernel -# :param sigma: sigma of normal distribution -# :param channel: input channel -# :return: 1D kernel -# ''' -# coords = torch.arange(window_size, dtype=torch.float) -# coords -= window_size // 2 -# -# g = torch.exp(-(coords ** 2) / (2 * sigma ** 2)) -# g /= g.sum() -# -# g = g.reshape(1, 1, 1, -1).repeat(channel, 1, 1, 1) -# return g -# -# -# @torch.jit.script -# def _gaussian_filter(x, window_1d, use_padding: bool): -# ''' -# Blur input with 1-D kernel -# :param x: batch of tensors to be blured -# :param window_1d: 1-D gauss kernel -# :param use_padding: padding image before conv -# :return: blured tensors -# ''' -# C = x.shape[1] -# padding = 0 -# if use_padding: -# window_size = window_1d.shape[3] -# padding = window_size // 2 -# out = F.conv2d(x, window_1d, stride=1, padding=(0, padding), groups=C) -# out = F.conv2d(out, window_1d.transpose(2, 3), stride=1, padding=(padding, 0), groups=C) -# return out -# -# -# @torch.jit.script -# def ssim(X, Y, window, data_range: float, use_padding: bool = False): -# ''' -# Calculate ssim index for X and Y -# :param X: images [B, C, H, N_bins] -# :param Y: images [B, C, H, N_bins] -# :param window: 1-D gauss kernel -# :param data_range: value range of input images. (usually 1.0 or 255) -# :param use_padding: padding image before conv -# :return: -# ''' -# -# K1 = 0.01 -# K2 = 0.03 -# compensation = 1.0 -# -# C1 = (K1 * data_range) ** 2 -# C2 = (K2 * data_range) ** 2 -# -# mu1 = _gaussian_filter(X, window, use_padding) -# mu2 = _gaussian_filter(Y, window, use_padding) -# sigma1_sq = _gaussian_filter(X * X, window, use_padding) -# sigma2_sq = _gaussian_filter(Y * Y, window, use_padding) -# sigma12 = _gaussian_filter(X * Y, window, use_padding) -# -# mu1_sq = mu1.pow(2) -# mu2_sq = mu2.pow(2) -# mu1_mu2 = mu1 * mu2 -# -# sigma1_sq = compensation * (sigma1_sq - mu1_sq) -# sigma2_sq = compensation * (sigma2_sq - mu2_sq) -# sigma12 = compensation * (sigma12 - mu1_mu2) -# -# cs_map = (2 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2) -# # Fixed the issue that the negative value of cs_map caused ms_ssim to output Nan. -# cs_map = cs_map.clamp_min(0.) -# ssim_map = ((2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)) * cs_map -# -# ssim_val = ssim_map.mean(dim=(1, 2, 3)) # reduce along CHW -# cs = cs_map.mean(dim=(1, 2, 3)) -# -# return ssim_val, cs -# -# -# @torch.jit.script -# def ms_ssim(X, Y, window, data_range: float, weights, use_padding: bool = False, eps: float = 1e-8): -# ''' -# interface of ms-ssim -# :param X: a batch of images, (N,C,H,W) -# :param Y: a batch of images, (N,C,H,W) -# :param window: 1-D gauss kernel -# :param data_range: value range of input images. (usually 1.0 or 255) -# :param weights: weights for different levels -# :param use_padding: padding image before conv -# :param eps: use for avoid grad nan. -# :return: -# ''' -# levels = weights.shape[0] -# cs_vals = [] -# ssim_vals = [] -# for _ in range(levels): -# ssim_val, cs = ssim(X, Y, window=window, data_range=data_range, use_padding=use_padding) -# # Use for fix a issue. When c = a ** b and a is 0, c.backward() will cause the a.grad become inf. -# ssim_val = ssim_val.clamp_min(eps) -# cs = cs.clamp_min(eps) -# cs_vals.append(cs) -# -# ssim_vals.append(ssim_val) -# padding = (X.shape[2] % 2, X.shape[3] % 2) -# X = F.avg_pool2d(X, kernel_size=2, stride=2, padding=padding) -# Y = F.avg_pool2d(Y, kernel_size=2, stride=2, padding=padding) -# -# cs_vals = torch.stack(cs_vals, dim=0) -# ms_ssim_val = torch.prod((cs_vals[:-1] ** weights[:-1].unsqueeze(1)) * (ssim_vals[-1] ** weights[-1]), dim=0) -# return ms_ssim_val -# -# -# class SSIM(torch.jit.ScriptModule): -# __constants__ = ['data_range', 'use_padding'] -# -# def __init__(self, window_size=11, window_sigma=1.5, data_range=255., channel=3, use_padding=False): -# ''' -# :param window_size: the size of gauss kernel -# :param window_sigma: sigma of normal distribution -# :param data_range: value range of input images. (usually 1.0 or 255) -# :param channel: input channels (default: 3) -# :param use_padding: padding image before conv -# ''' -# super().__init__() -# assert window_size % 2 == 1, 'Window size must be odd.' -# window = create_window(window_size, window_sigma, channel) -# self.register_buffer('window', window) -# self.data_range = data_range -# self.use_padding = use_padding -# -# @torch.jit.script_method -# def forward(self, X, Y): -# r = ssim(X, Y, window=self.window, data_range=self.data_range, use_padding=self.use_padding) -# return r[0] -# -# -# class MS_SSIM(torch.jit.ScriptModule): -# __constants__ = ['data_range', 'use_padding', 'eps'] -# -# def __init__(self, window_size=11, window_sigma=1.5, data_range=255., channel=3, use_padding=False, weights=None, -# levels=None, eps=1e-8): -# ''' -# class for ms-ssim -# :param window_size: the size of gauss kernel -# :param window_sigma: sigma of normal distribution -# :param data_range: value range of input images. (usually 1.0 or 255) -# :param channel: input channels -# :param use_padding: padding image before conv -# :param weights: weights for different levels. (default [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]) -# :param levels: number of downsampling -# :param eps: Use for fix a issue. When c = a ** b and a is 0, c.backward() will cause the a.grad become inf. -# ''' -# super().__init__() -# assert window_size % 2 == 1, 'Window size must be odd.' -# self.data_range = data_range -# self.use_padding = use_padding -# self.eps = eps -# -# window = create_window(window_size, window_sigma, channel) -# self.register_buffer('window', window) -# -# if weights is None: -# weights = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333] -# weights = torch.tensor(weights, dtype=torch.float) -# -# if levels is not None: -# weights = weights[:levels] -# weights = weights / weights.sum() -# -# self.register_buffer('weights', weights) -# -# @torch.jit.script_method -# def forward(self, X, Y): -# return ms_ssim(X, Y, window=self.window, data_range=self.data_range, weights=self.weights, -# use_padding=self.use_padding, eps=self.eps) -# -# -# if __name__ == '__main__': -# print('Simple Test') -# im = torch.randint(0, 255, (5, 3, 256, 256), dtype=torch.float, device='cuda') -# img1 = im / 255 -# img2 = img1 * 0.5 -# -# losser = SSIM(data_range=1.).cuda() -# loss = losser(img1, img2).mean() -# -# losser2 = MS_SSIM(data_range=1.).cuda() -# loss2 = losser2(img1, img2).mean() -# -# print(loss.item()) -# print(loss2.item()) -# -# if __name__ == '__main__': -# print('Training Test') -# import cv2 -# import torch.optim -# import numpy as np -# import imageio -# import time -# -# out_test_video = False -# # 最好不要直接输出gif图,会非常大,最好先输出mkv文件后用ffmpeg转换到GIF -# video_use_gif = False -# -# im = cv2.imread('test_img1.jpg', 1) -# t_im = torch.from_numpy(im).cuda().permute(2, 0, 1).float()[None] / 255. -# -# if out_test_video: -# if video_use_gif: -# fps = 0.5 -# out_wh = (im.shape[1] // 2, im.shape[0] // 2) -# suffix = '.gif' -# else: -# fps = 5 -# out_wh = (im.shape[1], im.shape[0]) -# suffix = '.mkv' -# video_last_time = time.perf_counter() -# video = imageio.get_writer('ssim_test' + suffix, fps=fps) -# -# # 测试ssim -# print('Training SSIM') -# rand_im = torch.randint_like(t_im, 0, 255, dtype=torch.float32) / 255. -# rand_im.requires_grad = True -# optim = torch.optim.Adam([rand_im], 0.003, eps=1e-8) -# losser = SSIM(data_range=1., channel=t_im.shape[1]).cuda() -# ssim_score = 0 -# while ssim_score < 0.999: -# optim.zero_grad() -# loss = losser(rand_im, t_im) -# (-loss).sum().backward() -# ssim_score = loss.item() -# optim.step() -# r_im = np.transpose(rand_im.detach().cpu().numpy().clip(0, 1) * 255, [0, 2, 3, 1]).astype(np.uint8)[0] -# r_im = cv2.putText(r_im, 'ssim %f' % ssim_score, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2) -# -# if out_test_video: -# if time.perf_counter() - video_last_time > 1. / fps: -# video_last_time = time.perf_counter() -# out_frame = cv2.cvtColor(r_im, cv2.COLOR_BGR2RGB) -# out_frame = cv2.resize(out_frame, out_wh, interpolation=cv2.INTER_AREA) -# if isinstance(out_frame, cv2.UMat): -# out_frame = out_frame.get() -# video.append_data(out_frame) -# -# cv2.imshow('ssim', r_im) -# cv2.setWindowTitle('ssim', 'ssim %f' % ssim_score) -# cv2.waitKey(1) -# -# if out_test_video: -# video.close() -# -# # 测试ms_ssim -# if out_test_video: -# if video_use_gif: -# fps = 0.5 -# out_wh = (im.shape[1] // 2, im.shape[0] // 2) -# suffix = '.gif' -# else: -# fps = 5 -# out_wh = (im.shape[1], im.shape[0]) -# suffix = '.mkv' -# video_last_time = time.perf_counter() -# video = imageio.get_writer('ms_ssim_test' + suffix, fps=fps) -# -# print('Training MS_SSIM') -# rand_im = torch.randint_like(t_im, 0, 255, dtype=torch.float32) / 255. -# rand_im.requires_grad = True -# optim = torch.optim.Adam([rand_im], 0.003, eps=1e-8) -# losser = MS_SSIM(data_range=1., channel=t_im.shape[1]).cuda() -# ssim_score = 0 -# while ssim_score < 0.999: -# optim.zero_grad() -# loss = losser(rand_im, t_im) -# (-loss).sum().backward() -# ssim_score = loss.item() -# optim.step() -# r_im = np.transpose(rand_im.detach().cpu().numpy().clip(0, 1) * 255, [0, 2, 3, 1]).astype(np.uint8)[0] -# r_im = cv2.putText(r_im, 'ms_ssim %f' % ssim_score, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2) -# -# if out_test_video: -# if time.perf_counter() - video_last_time > 1. / fps: -# video_last_time = time.perf_counter() -# out_frame = cv2.cvtColor(r_im, cv2.COLOR_BGR2RGB) -# out_frame = cv2.resize(out_frame, out_wh, interpolation=cv2.INTER_AREA) -# if isinstance(out_frame, cv2.UMat): -# out_frame = out_frame.get() -# video.append_data(out_frame) -# -# cv2.imshow('ms_ssim', r_im) -# cv2.setWindowTitle('ms_ssim', 'ms_ssim %f' % ssim_score) -# cv2.waitKey(1) -# -# if out_test_video: -# video.close() - -""" -Adapted from https://github.com/Po-Hsun-Su/pytorch-ssim -""" - -import torch -import torch.nn.functional as F -from torch.autograd import Variable -import numpy as np -from math import exp - - -def gaussian(window_size, sigma): - gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)]) - return gauss / gauss.sum() - - -def create_window(window_size, channel): - _1D_window = gaussian(window_size, 1.5).unsqueeze(1) - _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) - window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous()) - return window - - -def _ssim(img1, img2, window, window_size, channel, size_average=True): - mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel) - mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel) - - mu1_sq = mu1.pow(2) - mu2_sq = mu2.pow(2) - mu1_mu2 = mu1 * mu2 - - sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq - sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq - sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2 - - C1 = 0.01 ** 2 - C2 = 0.03 ** 2 - - ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)) - - if size_average: - return ssim_map.mean() - else: - return ssim_map.mean(1) - - -class SSIM(torch.nn.Module): - def __init__(self, window_size=11, size_average=True): - super(SSIM, self).__init__() - self.window_size = window_size - self.size_average = size_average - self.channel = 1 - self.window = create_window(window_size, self.channel) - - def forward(self, img1, img2): - (_, channel, _, _) = img1.size() - - if channel == self.channel and self.window.data.type() == img1.data.type(): - window = self.window - else: - window = create_window(self.window_size, channel) - - if img1.is_cuda: - window = window.cuda(img1.get_device()) - window = window.type_as(img1) - - self.window = window - self.channel = channel - - return _ssim(img1, img2, window, self.window_size, channel, self.size_average) - - -window = None - - -def ssim(img1, img2, window_size=11, size_average=True): - (_, channel, _, _) = img1.size() - global window - if window is None: - window = create_window(window_size, channel) - if img1.is_cuda: - window = window.cuda(img1.get_device()) - window = window.type_as(img1) - return _ssim(img1, img2, window, window_size, channel, size_average) diff --git a/spaces/znskiss/Qwen-VL/touchstone/README.md b/spaces/znskiss/Qwen-VL/touchstone/README.md deleted file mode 100644 index 65f0f70697196b7b245bf9181bf82aa61a73819b..0000000000000000000000000000000000000000 --- a/spaces/znskiss/Qwen-VL/touchstone/README.md +++ /dev/null @@ -1,69 +0,0 @@ -
        - -

        - -

        -
        - -

        - 中文  |  English -

        -

        - -**TOUCHSTONE** is a comprehensive assessment of multimodal language models, encompassing not only basic recognition and comprehension but also extending to literary creation. By automating the evaluation process and converting multimodal information into text, our TouchStone allows for efficient and accurate assessment of dialogue quality, leveraging the power of advanced language models without the need for manual intervention. - -## DATASET - -To evaluate the abilities of LVLMs, we construct a diverse and comprehensive dataset that covers five key dimensions: basic descriptive ability, visual recognition ability, visual comprehension ability, visual storytelling ability, and multi-image analysis ability. - -- **Basic Descriptive Ability** Image description involves the ability of a model to describe the information contained in an image, including simple and detailed descriptions. Simple descriptions are typically short phrases that describe the main subject and action of the image, while detailed descriptions provide more in-depth information about the image scene, their attributes, and relationships. - -- **Visual Recognition Ability** Image recognition is the task of recognizing objects or scenes within an image and inferring relevant information. This area can be further divided into several sub-tasks, including attribute QA, movie/TV recognition, art recognition, landmark recognition, celebrity recognition, emotion recognition, text recognition, object recognition, and structure content recognition. - -- **Visual Comprehension Ability** Image understanding involves the ability of a model to understand the meaning of an image and associated tasks. This area encompasses several sub-tasks, such as style appreciation, abstract image understanding, meme understanding, image analysis, chart analysis, general problem-solving, and reasoning QA. - -- **Visual Storytelling Ability** The visual storytelling ability is the process of literary creation based on visual content, including writing emails, poetry, stories, ads/commodity recommendations, and brainstorming. - -- **Multi-Image Analysis Ability** Multi-image analysis is the task of analyzing and comparing multiple images. This area includes tasks such as comparing two/multiple images, summarizing multiple image information, comparing commodities, and step-by-step analysis of images. - - -

        - -

        - -We comprehensively evaluate the model's ability from five dimensions. As shown in the figure above, an example of 27 subtasks is given. From perception to cognition to creativity, as the difficulty increases, the requirements for models are also getting higher and higher. Currently, LVLM capabilities are in their early stages. Our dataset contains 800+ questions and 27 categories. - -## Methods - - -We apply a powerful LLM as a judge to enable automated evaluation. To effectively comprehend the contents of an image, we manually substitute the actual image input with fine-grained textual annotations. By inputting these annotations and corresponding questions to a powerful LLM like GPT4, we obtain reference answers. - -For the evaluation of the LVLMs, we provide actual images and questions as input and obtain their respective answers. Finally, we employ GPT4 to score the answers generated by the LVLMs based on the fine-grained annotations and questions. The scoring instructions require the model to assess the usefulness, relevance, and accuracy of the answers, considering the annotations as the content of the images. To ensure fairness in the evaluation, each model's answer is compared against a consistent reference answer from GPT4. The average score of the model in all questions is taken as the final score. - -To eliminate the influence of answer position, we perform a second scoring round by swapping the positions of the answers and then compute the average of the two scores obtained. This approach aims to mitigate any bias introduced by the placement of the answers. - -

        - -

        - -### Evaluation - -#### Evaluation in English-based Multimodal Dialogue - -| Model | Score | -|---------------|-------| -| PandaGPT | 488.5 | -| MiniGPT4 | 531.7 | -| InstructBLIP | 552.4 | -| LLaMA-AdapterV2 | 590.1 | -| mPLUG-Owl | 605.4 | -| LLaVA | 602.7 | -| Qwen-VL-Chat | 645.2 | - -#### Evaluation in Chinese-based Multimodal Dialogue - -| Model | Score | -|---------------|-------| -| VisualGLM | 247.1 | -| Qwen-VL-Chat | 401.2 | -