diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/italygpt2/__init__.py b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/italygpt2/__init__.py deleted file mode 100644 index 1eb191c0d57d5d14de4cf2f5b0a2ae3722311ab8..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/italygpt2/__init__.py +++ /dev/null @@ -1,70 +0,0 @@ -import re -import requests -import hashlib -from fake_useragent import UserAgent -class Account: - @staticmethod - def create(): - r=requests.get("https://italygpt.it/",headers=Account._header) - f=r.text - tid=re.search('',f).group(1) - if len(tid)==0: - raise RuntimeError("NetWorkError:failed to get id.") - else: - Account._tid=tid - Account._raw="[]" - return Account - def next(next_id:str)->str: - Account._tid=next_id - return Account._tid - def get()->str: - return Account._tid - _header={ - "Host": "italygpt.it", - "Referer":"https://italygpt.it/", - "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",#UserAgent().random, - "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8", - "Accept-Language":"zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2", - "Upgrade-Insecure-Requests":"1", - "Sec-Fetch-Dest":"document", - "Sec-Fetch-Mode":"navigate", - "Sec-Fetch-Site":"none", - "Sec-Fetch-User":"?1", - "Connection":"keep-alive", - "Alt-Used":"italygpt.it", - "Pragma":"no-cache", - "Cache-Control":"no-cache", - "TE": "trailers" - } - def settraw(raws:str): - Account._raw=raws - return Account._raw - def gettraw(): - return Account._raw - -class Completion: - @staticmethod - def create( - account_data, - prompt: str, - message=False - ): - param={ - "prompt":prompt.replace(" ","+"), - "creative":"off", - "internet":"false", - "detailed":"off", - "current_id":"0", - "code":"", - "gpt4":"false", - "raw_messages":account_data.gettraw(), - "hash":hashlib.sha256(account_data.get().encode()).hexdigest() - } - if(message): - param["raw_messages"]=str(message) - r = requests.get("https://italygpt.it/question",headers=account_data._header,params=param,stream=True) - account_data.next(r.headers["Next_id"]) - account_data.settraw(r.headers["Raw_messages"]) - for chunk in r.iter_content(chunk_size=None): - r.raise_for_status() - yield chunk.decode() \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dragonball Z Raging Blast 2 PC.rar What Makes This Game So Awesome and How to Get It.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dragonball Z Raging Blast 2 PC.rar What Makes This Game So Awesome and How to Get It.md deleted file mode 100644 index 6cdaee9c794c5b33ca386e5a9e227e2a51595a22..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dragonball Z Raging Blast 2 PC.rar What Makes This Game So Awesome and How to Get It.md +++ /dev/null @@ -1,106 +0,0 @@ -
-

What is Realtek ATI HDMI Audio Device 2-70 Crack?

-

If you want to enjoy high-quality sound from your PC's HDMI port, you need a reliable audio driver that can communicate with your hardware and software. One of the most popular audio drivers for HDMI devices is Realtek ATI HDMI Audio Device Driver, which supports all of Realtek HD Audio Codec.

-

However, downloading and installing the official version of this driver may not be enough for some users who want to unlock more features and performance. That's why some people look for a crack version of this driver, which is a modified or hacked version that bypasses the license verification and activation process.

-

Realtek ATI HDMI Audio Device 2-70 Crack


Download Zip ===> https://byltly.com/2uKxWv



-

In this article, we will tell you everything you need to know about Realtek ATI HDMI Audio Device 2-70 Crack, which is one of the latest versions of this driver package. We will explain why you may need it, how to download and install it, how to fix common issues with it, and what are some alternatives to it.

-

Why do you need Realtek ATI HDMI Audio Device 2-70 Crack?

-

There are several reasons why you may want to use a crack version of Realtek ATI HDMI Audio Device Driver instead of the official one. Here are some of them:

- -

Of course, using a crack version also comes with some risks and drawbacks, such as:

- -

Therefore, before you decide to use a crack version of Realtek ATI HDMI Audio Device Driver, you should weigh the pros and cons carefully and be aware of the potential consequences.

-

How to download and install Realtek ATI HDMI Audio Device 2-70 Crack?

-

If you still want to try Realtek ATI HDMI Audio Device 2-70 Crack, here are the steps you need to follow:

-
    -
  1. Download the crack file. You can find various sources for downloading this file on the internet, such as torrent sites, file-sharing platforms, or online forums. However, be careful not to download any fake or malicious files that can harm your computer. You should also scan any file you download with an antivirus program before opening it.
  2. -
  3. Extract the crack file. After downloading the file, you need to extract it using a program like WinRAR or 7-Zip. You should see a folder containing several files, such as setup.exe, readme.txt, crack.dll, etc.
  4. -
  5. Run the setup file. Double-click on the setup.exe file to launch the installation wizard. Follow the instructions on the screen to install the driver package. You may need to restart your computer after the installation is complete.
  6. -
  7. Copy and paste the crack file. Locate the crack.dll file in the folder you extracted earlier. Copy this file and paste it into the installation directory of Realtek ATI HDMI Audio Device Driver. This is usually located in C:\Program Files\Realtek\Audio\HDA\. You may need to overwrite or replace an existing file with the same name.
  8. -
  9. Enjoy your cracked driver. You have successfully installed Realtek ATI HDMI Audio Device 2-70 Crack on your computer. You can now access more features and settings from your audio device manager or control panel.
  10. -
-

How to fix common issues with Realtek ATI HDMI Audio Device 2-70 Crack?

-

Sometimes, you may encounter some problems or errors when using Realtek ATI HDMI Audio Device 2-70 Crack. Here are some tips and tricks on how to troubleshoot them:

- -

What are the alternatives to Realtek ATI HDMI Audio Device 2-70 Crack?

- Realtek ATI HDMI Audio Device 2-70 Crack, you may want to consider some other alternatives for HDMI audio drivers. Here are some of them:

-

AMD High Definition Audio Device Driver

-

If you have an AMD graphics card or chipset, you may want to use the AMD High Definition Audio Device Driver, which is designed to work with AMD HDMI devices. This driver supports various audio formats and features, such as Dolby TrueHD, DTS-HD Master Audio, 7.1 surround sound, and more. You can download this driver from the AMD website or use the AMD Radeon Software to update it automatically.

-

How to download Realtek ATI HDMI Audio Device 2-70 Crack for free
-Realtek ATI HDMI Audio Device 2-70 Crack full version download
-Realtek ATI HDMI Audio Device 2-70 Crack serial key generator
-Realtek ATI HDMI Audio Device 2-70 Crack activation code
-Realtek ATI HDMI Audio Device 2-70 Crack license key
-Realtek ATI HDMI Audio Device 2-70 Crack patch
-Realtek ATI HDMI Audio Device 2-70 Crack torrent
-Realtek ATI HDMI Audio Device 2-70 Crack rar file
-Realtek ATI HDMI Audio Device 2-70 Crack zip file
-Realtek ATI HDMI Audio Device 2-70 Crack iso file
-Realtek ATI HDMI Audio Device 2-70 Crack setup file
-Realtek ATI HDMI Audio Device 2-70 Crack installer
-Realtek ATI HDMI Audio Device 2-70 Crack offline installer
-Realtek ATI HDMI Audio Device 2-70 Crack portable version
-Realtek ATI HDMI Audio Device 2-70 Crack latest version
-Realtek ATI HDMI Audio Device 2-70 Crack updated version
-Realtek ATI HDMI Audio Device 2-70 Crack review
-Realtek ATI HDMI Audio Device 2-70 Crack features
-Realtek ATI HDMI Audio Device 2-70 Crack benefits
-Realtek ATI HDMI Audio Device 2-70 Crack pros and cons
-Realtek ATI HDMI Audio Device 2-70 Crack comparison
-Realtek ATI HDMI Audio Device 2-70 Crack alternatives
-Realtek ATI HDMI Audio Device 2-70 Crack competitors
-Realtek ATI HDMI Audio Device 2-70 Crack compatibility
-Realtek ATI HDMI Audio Device 2-70 Crack system requirements
-Realtek ATI HDMI Audio Device 2-70 Crack troubleshooting
-Realtek ATI HDMI Audio Device 2-70 Crack error codes
-Realtek ATI HDMI Audio Device 2-70 Crack fix
-Realtek ATI HDMI Audio Device 2-70 Crack support
-Realtek ATI HDMI Audio Device 2-70 Crack customer service
-Realtek ATI HDMI Audio Device 2-70 Crack manual
-Realtek ATI HDMI Audio Device 2-70 Crack guide
-Realtek ATI HDMI Audio Device 2-70 Crack tutorial
-Realtek ATI HDMI Audio Device 2.7.0.1 Driver Download for Windows

-

NVIDIA High Definition Audio Driver

-

If you have an NVIDIA graphics card or chipset, you may want to use the NVIDIA High Definition Audio Driver, which is designed to work with NVIDIA HDMI devices. This driver supports various audio formats and features, such as Dolby Digital Plus, DTS-HD Master Audio, 7.1 surround sound, and more. You can download this driver from the NVIDIA website or use the NVIDIA GeForce Experience to update it automatically.

-

Intel High Definition Audio Driver

-

If you have an Intel processor or chipset, you may want to use the Intel High Definition Audio Driver, which is designed to work with Intel HDMI devices. This driver supports various audio formats and features, such as Dolby Digital Plus, DTS-HD Master Audio, 7.1 surround sound, and more. You can download this driver from the Intel website or use the Intel Driver & Support Assistant to update it automatically.

-

Conclusion

-

In conclusion, Realtek ATI HDMI Audio Device 2-70 Crack is a crack version of a popular audio driver for HDMI devices that can offer more features and performance than the official version. However, it also comes with some risks and drawbacks that you should be aware of before using it. If you are looking for other options for HDMI audio drivers, you can try some of the alternatives we mentioned above.

-

We hope this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

-

FAQs

- -

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/!EXCLUSIVE! Download Buku Ppdgj Iii Pdf Files.md b/spaces/1gistliPinn/ChatGPT4/Examples/!EXCLUSIVE! Download Buku Ppdgj Iii Pdf Files.md deleted file mode 100644 index 54335641fb9f7fba85150a0a06cb0583299a9678..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/!EXCLUSIVE! Download Buku Ppdgj Iii Pdf Files.md +++ /dev/null @@ -1,104 +0,0 @@ - -

Download Buku PPDGJ III PDF Files: A Complete Guide

- -

If you are looking for a reliable and comprehensive source of information on mental disorders, you may want to download buku ppdgj iii pdf files. Buku PPDGJ III is the Indonesian version of the Diagnostic and Statistical Manual of Mental Disorders (DSM), which is the most widely used classification system for mental disorders in the world. Buku PPDGJ III was published in 1993 by the World Health Organization (WHO) and the Indonesian Psychiatric Association (IPA), and it is based on the International Classification of Diseases (ICD-10).

-

download buku ppdgj iii pdf files


DOWNLOAD ✔✔✔ https://imgfil.com/2uxXyy



- -

In this article, we will explain what buku ppdgj iii pdf files are, why they are useful, how to access and use them, and what benefits they offer for mental health professionals and students. We will also provide some tips on how to write an effective and SEO-optimized article using buku ppdgj iii pdf files as a reference.

- -

What are Buku PPDGJ III PDF Files?

- -

Buku PPDGJ III PDF files are digital copies of the book PPDGJ III, which stands for Pedoman Penggolongan dan Diagnosis Gangguan Jiwa di Indonesia III (Guidelines for Classification and Diagnosis of Mental Disorders in Indonesia III). This book contains the official criteria and guidelines for diagnosing and classifying mental disorders in Indonesia, according to the international standards of WHO and IPA.

- -

Buku PPDGJ III PDF files are available online for free download from various sources, such as Scribd, Doku, and Documents and E-books. You can also find them by searching for "download buku ppdgj iii pdf files" on Google or other search engines. The PDF files are usually around 9 MB in size and have about 170 pages.

- -

Why are Buku PPDGJ III PDF Files Useful?

- -

Buku PPDGJ III PDF files are useful for several reasons. First, they provide a comprehensive and updated overview of the current knowledge and practice of psychiatry in Indonesia. They cover a wide range of mental disorders, such as mood disorders, anxiety disorders, personality disorders, psychotic disorders, substance-related disorders, and more. They also include diagnostic criteria, clinical features, differential diagnosis, etiology, course, prognosis, treatment, and prevention of each disorder.

- -

Second, they help to standardize and harmonize the diagnosis and classification of mental disorders in Indonesia. By using buku ppdgj iii pdf files as a reference, mental health professionals can ensure that they are following the same criteria and guidelines as their colleagues and peers. This can improve the quality and consistency of mental health services and research in Indonesia.

- -

Third, they facilitate communication and collaboration among mental health professionals across different settings and regions. By using buku ppdgj iii pdf files as a common language, mental health professionals can easily share information and opinions about their cases and clients. They can also compare and contrast their findings and outcomes with other professionals who use the same system.

-

- -

How to Access and Use Buku PPDGJ III PDF Files?

- -

To access and use buku ppdgj iii pdf files, you need to have a computer or a mobile device with an internet connection and a PDF reader software. You can download buku ppdgj iii pdf files from any of the sources mentioned above or from other websites that offer them. You can also scan or photocopy the printed version of the book if you have access to it.

- -

To use buku ppdgj iii pdf files effectively, you need to have some basic knowledge of psychiatry and mental disorders. You also need to be familiar with the structure and format of the book. The book is divided into four parts: Part I: Introduction; Part II: General Principles of Diagnosis; Part III: Specific Disorders; Part IV: Appendices.

- -

Part I: Introduction provides some background information on the history, development, purpose, scope, limitations, and revisions of PPDGJ III. It also explains the basic concepts and terms used in the book.

- -

Part II: General Principles of Diagnosis outlines the general rules and guidelines for diagnosing mental disorders using PPDGJ III. It covers topics such as diagnostic criteria, diagnostic categories, diagnostic axes, multiaxial assessment, differential diagnosis, comorbidity, reliability, validity, cultural factors, ethical issues, and legal implications.

- -

Part III: Specific Disorders describes each specific disorder in detail. It follows a uniform format that includes: name of disorder; code number; diagnostic criteria; clinical features; differential diagnosis; etiology; course; prognosis; treatment; prevention; notes.

- -

Part IV: Appendices contains some supplementary materials that support the main text of the book. It includes: glossary of terms; list of abbreviations; list of references; index.

- -

What Benefits do Buku PPDGJ III PDF Files Offer for Mental Health Professionals?

- -

Buku PPDGJ III PDF files offer many benefits for mental health professionals who work or study in Indonesia or who have an interest in Indonesian psychiatry. Some of these benefits are:

- - - -

How to Write an Effective and SEO-Optimized Article Using Buku PPDGJ III PDF Files as a Reference?

- -

If you want to write an effective and SEO-optimized article using buku ppdgj iii pdf files as a reference

- - -- Add more details or examples to each point or paragraph -- Add more subheadings or sections to cover more aspects of the topic -- Add more images or videos to illustrate the content -- Add more quotes or testimonials from experts or users -- Add more statistics or facts to support the claims - -I hope this helps you write an effective and SEO-optimized article using buku ppdgj iii pdf files as a reference. If you have any questions or feedback, please let me know.? -

Download Links for Buku PPDGJ III PDF Files

- - - -

Resources and Information on Buku PPDGJ III PDF Files and Mental Health

- - -

How to Use Buku PPDGJ III PDF Files for Diagnosis and Classification of Mental Disorders

- -

One of the main purposes of buku ppdgj iii pdf files is to help mental health professionals diagnose and classify mental disorders in Indonesia. To use buku ppdgj iii pdf files for this purpose, you need to follow some steps and guidelines. Here are some tips to help you use buku ppdgj iii pdf files effectively for diagnosis and classification of mental disorders.

- -
    -
  1. Conduct a thorough assessment of the patient's symptoms, history, and context. You can use various methods and tools, such as interviews, observations, tests, scales, questionnaires, etc. You can also consult with other professionals or family members if needed.
  2. -
  3. Compare the patient's symptoms and features with the diagnostic criteria and clinical features of each disorder in buku ppdgj iii pdf files. You can use the index or the table of contents to find the relevant disorder or category. You can also use the notes section to find additional information or clarifications.
  4. -
  5. Select the most appropriate diagnosis or diagnoses for the patient based on the best fit and evidence. You can use the differential diagnosis section to rule out other possible disorders or conditions. You can also use the multiaxial assessment system to assign a diagnosis on each of the five axes: Axis I: Clinical Disorders; Axis II: Personality Disorders and Mental Retardation; Axis III: General Medical Conditions; Axis IV: Psychosocial and Environmental Problems; Axis V: Global Assessment of Functioning.
  6. -
  7. Document and communicate your diagnosis or diagnoses clearly and accurately. You can use the code number and the name of each disorder as they appear in buku ppdgj iii pdf files. You can also use the etiology, course, prognosis, treatment, and prevention sections to provide more information or recommendations for the patient.
  8. -
- -

How to Use Buku PPDGJ III PDF Files for Learning and Teaching Psychiatry

- -

Another purpose of buku ppdgj iii pdf files is to help mental health professionals and students learn and teach psychiatry in Indonesia. To use buku ppdgj iii pdf files for this purpose, you need to follow some steps and guidelines. Here are some tips to help you use buku ppdgj iii pdf files effectively for learning and teaching psychiatry.

- -
    -
  1. Read and study buku ppdgj iii pdf files regularly and thoroughly. You can use the introduction and the general principles of diagnosis sections to learn the basic concepts and terms of psychiatry. You can also use the specific disorders sections to learn the details and features of each disorder.
  2. -
  3. Practice and apply buku ppdgj iii pdf files in real or simulated situations. You can use case studies, role plays, quizzes, exams, assignments, projects, etc. to test your knowledge and skills in diagnosing and classifying mental disorders using buku ppdgj iii pdf files. You can also use feedback, reflection, supervision, consultation, etc. to improve your performance and competence.
  4. -
  5. Share and discuss buku ppdgj iii pdf files with other professionals or students. You can use seminars, workshops, conferences, journals, blogs, forums, etc. to exchange information and opinions about buku ppdgj iii pdf files and psychiatry in general. You can also use research, publication, teaching, training, etc. to contribute to the development and dissemination of buku ppdgj iii pdf files and psychiatry in Indonesia.
  6. -
-

Conclusion

- -

Buku PPDGJ III PDF files are valuable resources for mental health professionals and students who work or study in Indonesia or who have an interest in Indonesian psychiatry. They provide a comprehensive and updated source of information on mental disorders that is relevant to the Indonesian context. They also help to standardize and harmonize the diagnosis and classification of mental disorders in Indonesia. Furthermore, they facilitate communication and collaboration among mental health professionals across different settings and regions. They also enhance professional development and learning by providing opportunities for self-assessment, feedback, reflection, research, publication, teaching, training, supervision, consultation, etc. They also increase public awareness and understanding of mental disorders by providing accurate and reliable information that can be used for education, advocacy, prevention, intervention, etc.

- -

If you want to download buku ppdgj iii pdf files or learn more about them, you can use the links and resources provided in this article. You can also use the tips and guidelines provided in this article to write an effective and SEO-optimized article using buku ppdgj iii pdf files as a reference. This will help you increase your website's visibility and traffic, as well as your credibility and authority in your field.

- -

We hope this article has been helpful and informative for you. If you have any questions or feedback, please feel free to contact us. Thank you for reading.

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/(2011) Crack.PhotoElf.4.1.12 11 !!LINK!!.md b/spaces/1gistliPinn/ChatGPT4/Examples/(2011) Crack.PhotoElf.4.1.12 11 !!LINK!!.md deleted file mode 100644 index 2398f64fa213ef4e3dbdefe79b0bca3e41204b33..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/(2011) Crack.PhotoElf.4.1.12 11 !!LINK!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

(2011) Crack.PhotoElf.4.1.12 11


Download File 🗸🗸🗸 https://imgfil.com/2uxYnd



- - 3cee63e6c2
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Application X-msdownload How To Open ((INSTALL)).md b/spaces/1gistliPinn/ChatGPT4/Examples/Application X-msdownload How To Open ((INSTALL)).md deleted file mode 100644 index 49d8b871a43050705a0494a02cc965b2e09524f8..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Application X-msdownload How To Open ((INSTALL)).md +++ /dev/null @@ -1,9 +0,0 @@ -

application x-msdownload how to open


Download 🆓 https://imgfil.com/2uxXnL



- -. KDE office; Microsoft Office Modeling; open office; Other Adobe applications. app/vnd.ms-cab-compressed . application/x-apple-diskimage. Download Adobe Acrobat Reader DC for Windows in Russian without registration and SMS from the link below. -Acrobat Reader for Windows 10 in Russian via a direct link from the official website without registration and SMS. -Download Adobe Acrobat Reader DC for free for Windows 7 in Russian without registration and SMS using the direct link below. -Adobe Reader DC 2019. 8a78ff9644
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Archicad 16 ((FREE)) Crack Download Mega.md b/spaces/1gistliPinn/ChatGPT4/Examples/Archicad 16 ((FREE)) Crack Download Mega.md deleted file mode 100644 index 0efe8d0e884a958411a6eb8d4d1be0d3092fa724..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Archicad 16 ((FREE)) Crack Download Mega.md +++ /dev/null @@ -1,6 +0,0 @@ -

archicad 16 crack download mega


Download Ziphttps://imgfil.com/2uxX4v



-
-0:00 / 5:25•Watch the full video. Live. •. Scroll for details. Install full Archicad 16. 16,834 views16K views. September 7, 2015 . •. In the ArchiCAD Video Lessons section, you can watch video tutorials on working with ArchiCAD 16, which covers the basic working methods, such as creating and editing objects, creating walls, creating windows and doors, designing a roof, facade elements, creating and editing interior walls and partitions. The lesson uses an example project in which it is necessary to build a frame house. •. Scroll for details. Video tutorials Archicad 16. 2 771 views2 thousand views. 8a78ff9644
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Casio Fx 880p Emulator.md b/spaces/1gistliPinn/ChatGPT4/Examples/Casio Fx 880p Emulator.md deleted file mode 100644 index 5e6cf2f8315de4ca92ee0a156db6b0955e2dda16..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Casio Fx 880p Emulator.md +++ /dev/null @@ -1,20 +0,0 @@ -

Casio Fx 880p Emulator


DOWNLOAD ○○○ https://imgfil.com/2uxYzF



-
-casio fx 880p emulator - -Length 00:10:42 - Size 9.42 MB - -Convert Youtube video to mp3Download Mp3Download has been launched, thanks for supporting us.Download has been launched, thank you.DownloadedVideo is under conversion, please wait...Video is under conversion...Converting...Sorry, this video is protected, thank you for your understanding.Sorry, this video is protected.Protected video. - -casio fx 880p emulator free convert to mp3 - -Length 00:10:24 - Size 9.04 MB - -casio fx 880p emulator master cart - -Length 00:25:47 - Size 23.33 MB - -Convert Youtube video to mp3Download Mp3Download has been launched, thanks for supporting us.Download has been launched, thank you.DownloadedVideo is under conversion, please wait...Video is under conversion...Converting...Sorry, this video is protected, thank you for your understanding.Sorry, this video is protected.Prot 4fefd39f24
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Coffee Crisis Download HOT For Pc [torrent Full].md b/spaces/1gistliPinn/ChatGPT4/Examples/Coffee Crisis Download HOT For Pc [torrent Full].md deleted file mode 100644 index 3ac06d34dae107abad7c4862bf2fc4fc2aa12db9..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Coffee Crisis Download HOT For Pc [torrent Full].md +++ /dev/null @@ -1,6 +0,0 @@ -

Coffee Crisis Download For Pc [torrent Full]


Download Zip ✵✵✵ https://imgfil.com/2uxZLq



- -Coffee Crisis is an arcade-style beat 'em up full of caffeinated carnage! ... The AI codenamed DUDE lies deep in the computer core of a long forgotten laboratory. 4d29de3e1b
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Emicsoft Vob Converter 4.1.20 REGISTRATION CODE.rar.md b/spaces/1gistliPinn/ChatGPT4/Examples/Emicsoft Vob Converter 4.1.20 REGISTRATION CODE.rar.md deleted file mode 100644 index 73017f17da23fc2a79fa344705f8fb8ef4558a8f..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Emicsoft Vob Converter 4.1.20 REGISTRATION CODE.rar.md +++ /dev/null @@ -1,13 +0,0 @@ -

emicsoft vob converter 4.1.20 REGISTRATION CODE.rar


Download Filehttps://imgfil.com/2uxZua



- -16 April 2021 — Online unit converter, October 21, 2021 01:45 AM . .html]emicsoft vob convertor 4.1.20 REGISTRATION CODE.rar[/url] royarborbert .. Converters: Unit converter — Unit converter (Unit converter) ( download ) — Unit converter. . -Unit converter. -On this page you can download the unit converter. -This program allows you to convert values ​​from different systems. -Unit Converter - Download Unit Converter for free. -Unit Converter - A program for converting values ​​from one dimensional system to another. -Unit converter . -Download Unit converter . (for Windows), Unit Converter (For MAC OS) . 8a78ff9644
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Focusrite Serial Number Prefixes What They Mean and How to Use Them.md b/spaces/1gistliPinn/ChatGPT4/Examples/Focusrite Serial Number Prefixes What They Mean and How to Use Them.md deleted file mode 100644 index 41da83a8d470ea7b29dffa7db24bb3bb2eda6ef4..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Focusrite Serial Number Prefixes What They Mean and How to Use Them.md +++ /dev/null @@ -1,19 +0,0 @@ - -

This link is for the Clarett interfaces with a Thunderbolt connection and the Clarett OctoPre.

The serial numbers for the Clarett range begin with a U.

-

Focusrite Serial Number


DOWNLOAD ····· https://imgfil.com/2uxYaB



-

The names of the different compressor and EQ patches only provide clues as to what they emulate, but see the 'Behind The Mask' box for a list of the real-life models that were researched in their creation. These range from expensive and rare outboard to popular consoles and workhorse rack gear, and the selection on offer is certainly impressive, covering many of the biggest and best-respected names in recording hardware from both the UK and the US, including a number of Focusrite's own products.

-

The Liquid Mix Manager software allows you to set the unit's sample rate, and also to specify the maximum number of channels of processing you wish to use, in case you want to conserve Firewire bandwidth.Operating the Liquid Mix is gratifyingly straightforward, and though some users might wish for photorealistic emulations of every plug-in control surface, there's a lot to be said for having a consistent control style. This being said, I personally find it less helpful when the normal layout is changed to reflect some of the oddities of the original, such as putting the HF controls on the left or having frequency controls that work backwards. While I'm making small gripes, I couldn't get the Snapshot facility to work in Logic, though the normal Save and Load menu does essentially the same thing, and does work. The compressor and EQ settings are always loaded together, though, and I feel it would make more sense to also have them available separately.

-

Sonically, I'm not able to vouch for the degree of authenticity of all the emulations, but there are some very nice-sounding EQs and compressors available covering a wide range of distinctive characters and styles. I particularly liked some of the more subtle passive EQs that seem to sweeten a mix or track with very little adjustment, and of course there are those kick-ass optical compressors that contrast nicely with the more workmanlike VCA versions. For the less experienced user, deciding which to use may present a challenge, but at the same time Liquid Mix offers a wonderful educational opportunity for any aspiring engineer to get familiar with the essential character of a number of classic compressors and equalisers that they may otherwise never come across.

-

We have had some reports of registration problems, but have been unable to reproduce the problem. Please send your username (anthonylavoie) along with your iLok account, plug-in serial number and key to support@eventide.com and we'll make sure we get you registered.

-

-

Free assistance is available for the first 60 days on new purchases, excluding internal hardware installations or networking support. Your invoice reference number will be required for free assistance.

-


Registration: 7/10
First thing was to register it. It didn't accept my information the first 2 times online when I filled out the Bundle serial number part, but the third time it took to the information and registered it as a product I own. Slightly frustrating but I stuck with it and it worked eventually. it's a confusing series of jumps and kept forcing me to re-sign in then complained I was already signed in and that my Bundle ID wasn't recognized when it was exactly right. I typed it in the first 2 times and the 3rd successful time was just a copy/paste of the second attempt.

-

The second major advantage of Thunderbolt for audio purposes is the lower levels of latency that are achievable using this protocol. Thunderbolt connects straight through to the PCIe layer, as opposed to USB which must go through a number of stages first (each stage adding additional latency).

-

What Generation of Scarlett Solo you got?
The serial number of your Scarlett will be found on the underside, either on a sticker or etched on the casing. The prefix of your serial number will denote which generation Scarlett you own:

-

U. Zanghieri presented a scheme for carrying AES3 signals on the "spare" pairs (4-5 and 7-8) of Ethernet 100Base-TX in a system where an audio source and a number of destination devices (such as powered loudspeakers) are connected in a ring, so that the system can survive loss of any one of the links. The Ethernet connection is used purely for control, as the latency through each unit is much higher than on the AES3 connection. A Project Initiation Request will be submitted in due course.

-

Apple has informed its official retail stores, AppleCare employees, and authorized resellers that a small number of third-generation Apple TV units have WiFi issues. These issues surround not being able to locate a WiFi network, unable to join a network, and dropped or intermittent connections.

-

Apple has determined that a very small number of Apple TV (3rd generation) products might experience one of these Wi-Fi related connectivity issues: Cannot locate network, Unable to join network, Dropped or intermittent connection.

-

Apple, which works with suppliers to test new designs all the time, has been testing various TV prototypes for a number of years, according to people familiar with the efforts. The company generally tests and develops products internally before doing so with outside suppliers.

-

In Spain, the lack of podcasting data, and existing conflicting numbers, are holding the medium back, says a well-researched article published in TELOS Magazine. It quotes podcaster Francisco Izuzquiza:

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Enjoy Epic Stickman Fights with Supreme Duelist APK 2022 Download Now.md b/spaces/1phancelerku/anime-remove-background/Enjoy Epic Stickman Fights with Supreme Duelist APK 2022 Download Now.md deleted file mode 100644 index 462262c238dec20422ec17ca16e342270a592f1c..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Enjoy Epic Stickman Fights with Supreme Duelist APK 2022 Download Now.md +++ /dev/null @@ -1,86 +0,0 @@ -
-

Download Supreme Duelist Stickman APK 2022: A Fun and Crazy Stickman Game

-

If you are looking for a fun and crazy stickman game to play on your Android device, you should download Supreme Duelist Stickman APK 2022. This is a popular stickman game that lets you fight against other stickmen in various modes and maps. You can also customize your character with different weapons and outfits, and enjoy realistic ragdoll physics. Whether you want to play solo or with your friends, Supreme Duelist Stickman APK 2022 will keep you entertained for hours.

-

download supreme duelist stickman apk 2022


DOWNLOAD ····· https://jinyurl.com/2uNKEJ



-

Features of Supreme Duelist Stickman APK 2022

-

Supreme Duelist Stickman APK 2022 has many features that make it one of the best stickman games on the market. Here are some of them:

- -

How to Download Supreme Duelist Stickman APK 2022

-

Downloading Supreme Duelist Stickman APK 2022 is very easy and fast. Just follow these simple steps:

-
    -
  1. Go to the official Google Play Store link or click here.
  2. -
  3. Tap on the Install button and wait for the download to finish.
  4. -
  5. Open the app and enjoy the game.
  6. -
-

Tips and Tricks for Supreme Duelist Stickman APK 2022

-

If you want to master Supreme Duelist Stickman APK 2022, you should know some tips and tricks that will help you improve your performance and have more fun. Here are some of them:

- -

Conclusion

-

Supreme Duelist Stickman APK 2022 is a fun and crazy stickman game that you should download and play on your Android device. It has many features that make it one of the best stickman games on the market, such as mini game mode, boss fight tournament mode, ragdoll physics, customizable characters, various modes and maps, and more. It is also easy to download and install, as long as you use the official Google Play Store link. Whether you want to play solo or with your friends, Supreme Duelist Stickman APK 2022 will keep you entertained for hours.

-

download supreme duelist stickman apk 2022 latest version
-download supreme duelist stickman apk 2022 mod
-download supreme duelist stickman apk 2022 free
-download supreme duelist stickman apk 2022 offline
-download supreme duelist stickman apk 2022 unlimited money
-download supreme duelist stickman apk 2022 for android
-download supreme duelist stickman apk 2022 for pc
-download supreme duelist stickman apk 2022 hack
-download supreme duelist stickman apk 2022 update
-download supreme duelist stickman apk 2022 full
-download supreme duelist stickman apk 2022 unlocked
-download supreme duelist stickman apk 2022 no ads
-download supreme duelist stickman apk 2022 game
-download supreme duelist stickman apk 2022 online
-download supreme duelist stickman apk 2022 pro
-download supreme duelist stickman apk 2022 premium
-download supreme duelist stickman apk 2022 cracked
-download supreme duelist stickman apk 2022 mega mod
-download supreme duelist stickman apk 2022 new
-download supreme duelist stickman apk 2022 original
-download supreme duelist stickman apk 2022 review
-download supreme duelist stickman apk 2022 best
-download supreme duelist stickman apk 2022 cheat
-download supreme duelist stickman apk 2022 fun
-download supreme duelist stickman apk 2022 guide
-download supreme duelist stickman apk 2022 tips
-download supreme duelist stickman apk 2022 tricks
-download supreme duelist stickman apk 2022 tutorial
-download supreme duelist stickman apk 2022 walkthrough
-download supreme duelist stickman apk 2022 gameplay
-download supreme duelist stickman apk 2022 features
-download supreme duelist stickman apk 2022 editor mode
-download supreme duelist stickman apk 2022 boss fight tournament mode
-download supreme duelist stickman apk 2022 football mini game mode
-download supreme duelist stickman apk 2022 skins unlocker
-download supreme duelist stickman apk 2022 realistic ragdoll physics
-download supreme duelist stickman apk 2022 energy shield option
-download supreme duelist stickman apk 2022 gravity option
-download supreme duelist stickman apk 2022 instant ko option
-download supreme duelist stickman apk 2022 multiplayer mode

-

FAQs

- -

I hope you enjoyed this article and found it helpful. If you have any questions or comments, please feel free to leave them below. Thank you for reading and have a great day!

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/2ndelement/voicevox/voicevox_engine/synthesis_engine/synthesis_engine_base.py b/spaces/2ndelement/voicevox/voicevox_engine/synthesis_engine/synthesis_engine_base.py deleted file mode 100644 index aaf4fc4a10e35b85c794793424a1e1f10698838b..0000000000000000000000000000000000000000 --- a/spaces/2ndelement/voicevox/voicevox_engine/synthesis_engine/synthesis_engine_base.py +++ /dev/null @@ -1,259 +0,0 @@ -import copy -from abc import ABCMeta, abstractmethod -from typing import List, Optional - -import numpy as np - -from .. import full_context_label -from ..full_context_label import extract_full_context_label -from ..model import AccentPhrase, AudioQuery, Mora -from ..mora_list import openjtalk_mora2text - - -def mora_to_text(mora: str) -> str: - if mora[-1:] in ["A", "I", "U", "E", "O"]: - # 無声化母音を小文字に - mora = mora[:-1] + mora[-1].lower() - if mora in openjtalk_mora2text: - return openjtalk_mora2text[mora] - else: - return mora - - -def adjust_interrogative_accent_phrases( - accent_phrases: List[AccentPhrase], -) -> List[AccentPhrase]: - """ - enable_interrogative_upspeakが有効になっていて与えられたaccent_phrasesに疑問系のものがあった場合、 - 各accent_phraseの末尾にある疑問系発音用のMoraに対して直前のMoraより少し音を高くすることで疑問文ぽくする - NOTE: リファクタリング時に適切な場所へ移動させること - """ - return [ - AccentPhrase( - moras=adjust_interrogative_moras(accent_phrase), - accent=accent_phrase.accent, - pause_mora=accent_phrase.pause_mora, - is_interrogative=accent_phrase.is_interrogative, - ) - for accent_phrase in accent_phrases - ] - - -def adjust_interrogative_moras(accent_phrase: AccentPhrase) -> List[Mora]: - moras = copy.deepcopy(accent_phrase.moras) - if accent_phrase.is_interrogative and not (len(moras) == 0 or moras[-1].pitch == 0): - interrogative_mora = make_interrogative_mora(moras[-1]) - moras.append(interrogative_mora) - return moras - else: - return moras - - -def make_interrogative_mora(last_mora: Mora) -> Mora: - fix_vowel_length = 0.15 - adjust_pitch = 0.3 - max_pitch = 6.5 - return Mora( - text=openjtalk_mora2text[last_mora.vowel], - consonant=None, - consonant_length=None, - vowel=last_mora.vowel, - vowel_length=fix_vowel_length, - pitch=min(last_mora.pitch + adjust_pitch, max_pitch), - ) - - -def full_context_label_moras_to_moras( - full_context_moras: List[full_context_label.Mora], -) -> List[Mora]: - return [ - Mora( - text=mora_to_text("".join([p.phoneme for p in mora.phonemes])), - consonant=(mora.consonant.phoneme if mora.consonant is not None else None), - consonant_length=0 if mora.consonant is not None else None, - vowel=mora.vowel.phoneme, - vowel_length=0, - pitch=0, - ) - for mora in full_context_moras - ] - - -class SynthesisEngineBase(metaclass=ABCMeta): - # FIXME: jsonではなくModelを返すようにする - @property - @abstractmethod - def speakers(self) -> str: - raise NotImplementedError - - @property - @abstractmethod - def supported_devices(self) -> Optional[str]: - raise NotImplementedError - - def initialize_speaker_synthesis( # noqa: B027 - self, speaker_id: int, skip_reinit: bool - ): - - """ - 指定した話者での音声合成を初期化する。何度も実行可能。 - 未実装の場合は何もしない - Parameters - ---------- - speaker_id : int - 話者ID - skip_reinit : bool - True の場合, 既に初期化済みの話者の再初期化をスキップします - """ - pass - - def is_initialized_speaker_synthesis(self, speaker_id: int) -> bool: - """ - 指定した話者での音声合成が初期化されているかどうかを返す - Parameters - ---------- - speaker_id : int - 話者ID - Returns - ------- - bool - 初期化されているかどうか - """ - return True - - @abstractmethod - def replace_phoneme_length( - self, accent_phrases: List[AccentPhrase], speaker_id: int - ) -> List[AccentPhrase]: - """ - accent_phrasesの母音・子音の長さを設定する - Parameters - ---------- - accent_phrases : List[AccentPhrase] - アクセント句モデルのリスト - speaker_id : int - 話者ID - Returns - ------- - accent_phrases : List[AccentPhrase] - 母音・子音の長さが設定されたアクセント句モデルのリスト - """ - raise NotImplementedError() - - @abstractmethod - def replace_mora_pitch( - self, accent_phrases: List[AccentPhrase], speaker_id: int - ) -> List[AccentPhrase]: - """ - accent_phrasesの音高(ピッチ)を設定する - Parameters - ---------- - accent_phrases : List[AccentPhrase] - アクセント句モデルのリスト - speaker_id : int - 話者ID - Returns - ------- - accent_phrases : List[AccentPhrase] - 音高(ピッチ)が設定されたアクセント句モデルのリスト - """ - raise NotImplementedError() - - def replace_mora_data( - self, - accent_phrases: List[AccentPhrase], - speaker_id: int, - ) -> List[AccentPhrase]: - return self.replace_mora_pitch( - accent_phrases=self.replace_phoneme_length( - accent_phrases=accent_phrases, - speaker_id=speaker_id, - ), - speaker_id=speaker_id, - ) - - def create_accent_phrases(self, text: str, speaker_id: int) -> List[AccentPhrase]: - if len(text.strip()) == 0: - return [] - - utterance = extract_full_context_label(text) - if len(utterance.breath_groups) == 0: - return [] - - accent_phrases = self.replace_mora_data( - accent_phrases=[ - AccentPhrase( - moras=full_context_label_moras_to_moras(accent_phrase.moras), - accent=accent_phrase.accent, - pause_mora=( - Mora( - text="、", - consonant=None, - consonant_length=None, - vowel="pau", - vowel_length=0, - pitch=0, - ) - if ( - i_accent_phrase == len(breath_group.accent_phrases) - 1 - and i_breath_group != len(utterance.breath_groups) - 1 - ) - else None - ), - is_interrogative=accent_phrase.is_interrogative, - ) - for i_breath_group, breath_group in enumerate(utterance.breath_groups) - for i_accent_phrase, accent_phrase in enumerate( - breath_group.accent_phrases - ) - ], - speaker_id=speaker_id, - ) - return accent_phrases - - def synthesis( - self, - query: AudioQuery, - speaker_id: int, - enable_interrogative_upspeak: bool = True, - ) -> np.ndarray: - """ - 音声合成クエリ内の疑問文指定されたMoraを変形した後、 - 継承先における実装`_synthesis_impl`を使い音声合成を行う - Parameters - ---------- - query : AudioQuery - 音声合成クエリ - speaker_id : int - 話者ID - enable_interrogative_upspeak : bool - 疑問系のテキストの語尾を自動調整する機能を有効にするか - Returns - ------- - wave : numpy.ndarray - 音声合成結果 - """ - # モーフィング時などに同一参照のqueryで複数回呼ばれる可能性があるので、元の引数のqueryに破壊的変更を行わない - query = copy.deepcopy(query) - if enable_interrogative_upspeak: - query.accent_phrases = adjust_interrogative_accent_phrases( - query.accent_phrases - ) - return self._synthesis_impl(query, speaker_id) - - @abstractmethod - def _synthesis_impl(self, query: AudioQuery, speaker_id: int) -> np.ndarray: - """ - 音声合成クエリから音声合成に必要な情報を構成し、実際に音声合成を行う - Parameters - ---------- - query : AudioQuery - 音声合成クエリ - speaker_id : int - 話者ID - Returns - ------- - wave : numpy.ndarray - 音声合成結果 - """ - raise NotImplementedError() diff --git a/spaces/801artistry/RVC801/lib/infer_pack/models_dml.py b/spaces/801artistry/RVC801/lib/infer_pack/models_dml.py deleted file mode 100644 index 958d7b29259763d2fea94caf8ba7e314c4a77d05..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/lib/infer_pack/models_dml.py +++ /dev/null @@ -1,1124 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer_pack import modules -from lib.infer_pack import attentions -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer_pack.commons import init_weights -import numpy as np -from lib.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv.float() - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/__init__.py b/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/discriminator/multi_window_disc.py b/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/discriminator/multi_window_disc.py deleted file mode 100644 index 1aef6493c90c7cf5206ff92f7fe8831a0821664f..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/discriminator/multi_window_disc.py +++ /dev/null @@ -1,196 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn - - -class Discriminator2DFactory(nn.Module): - def __init__(self, time_length, freq_length=80, kernel=(3, 3), c_in=1, hidden_size=128, - norm_type='bn', reduction='sum'): - super(Discriminator2DFactory, self).__init__() - padding = (kernel[0] // 2, kernel[1] // 2) - - def discriminator_block(in_filters, out_filters, first=False): - """ - Input: (B, in, 2H, 2W) - Output:(B, out, H, W) - """ - conv = nn.Conv2d(in_filters, out_filters, kernel, (2, 2), padding) - if norm_type == 'sn': - conv = nn.utils.spectral_norm(conv) - block = [ - conv, # padding = kernel//2 - nn.LeakyReLU(0.2, inplace=True), - nn.Dropout2d(0.25) - ] - if norm_type == 'bn' and not first: - block.append(nn.BatchNorm2d(out_filters, 0.8)) - if norm_type == 'in' and not first: - block.append(nn.InstanceNorm2d(out_filters, affine=True)) - block = nn.Sequential(*block) - return block - - self.model = nn.ModuleList([ - discriminator_block(c_in, hidden_size, first=True), - discriminator_block(hidden_size, hidden_size), - discriminator_block(hidden_size, hidden_size), - ]) - - self.reduction = reduction - ds_size = (time_length // 2 ** 3, (freq_length + 7) // 2 ** 3) - if reduction != 'none': - # The height and width of downsampled image - self.adv_layer = nn.Linear(hidden_size * ds_size[0] * ds_size[1], 1) - else: - self.adv_layer = nn.Linear(hidden_size * ds_size[1], 1) - - def forward(self, x): - """ - - :param x: [B, C, T, n_bins] - :return: validity: [B, 1], h: List of hiddens - """ - h = [] - for l in self.model: - x = l(x) - h.append(x) - if self.reduction != 'none': - x = x.view(x.shape[0], -1) - validity = self.adv_layer(x) # [B, 1] - else: - B, _, T_, _ = x.shape - x = x.transpose(1, 2).reshape(B, T_, -1) - validity = self.adv_layer(x)[:, :, 0] # [B, T] - return validity, h - - -class MultiWindowDiscriminator(nn.Module): - def __init__(self, time_lengths, cond_size=0, freq_length=80, kernel=(3, 3), - c_in=1, hidden_size=128, norm_type='bn', reduction='sum'): - super(MultiWindowDiscriminator, self).__init__() - self.win_lengths = time_lengths - self.reduction = reduction - - self.conv_layers = nn.ModuleList() - if cond_size > 0: - self.cond_proj_layers = nn.ModuleList() - self.mel_proj_layers = nn.ModuleList() - for time_length in time_lengths: - conv_layer = [ - Discriminator2DFactory( - time_length, freq_length, kernel, c_in=c_in, hidden_size=hidden_size, - norm_type=norm_type, reduction=reduction) - ] - self.conv_layers += conv_layer - if cond_size > 0: - self.cond_proj_layers.append(nn.Linear(cond_size, freq_length)) - self.mel_proj_layers.append(nn.Linear(freq_length, freq_length)) - - def forward(self, x, x_len, cond=None, start_frames_wins=None): - ''' - Args: - x (tensor): input mel, (B, c_in, T, n_bins). - x_length (tensor): len of per mel. (B,). - - Returns: - tensor : (B). - ''' - validity = [] - if start_frames_wins is None: - start_frames_wins = [None] * len(self.conv_layers) - h = [] - for i, start_frames in zip(range(len(self.conv_layers)), start_frames_wins): - x_clip, c_clip, start_frames = self.clip( - x, cond, x_len, self.win_lengths[i], start_frames) # (B, win_length, C) - start_frames_wins[i] = start_frames - if x_clip is None: - continue - if cond is not None: - x_clip = self.mel_proj_layers[i](x_clip) # (B, 1, win_length, C) - c_clip = self.cond_proj_layers[i](c_clip)[:, None] # (B, 1, win_length, C) - x_clip = x_clip + c_clip - x_clip, h_ = self.conv_layers[i](x_clip) - h += h_ - validity.append(x_clip) - if len(validity) != len(self.conv_layers): - return None, start_frames_wins, h - if self.reduction == 'sum': - validity = sum(validity) # [B] - elif self.reduction == 'stack': - validity = torch.stack(validity, -1) # [B, W_L] - elif self.reduction == 'none': - validity = torch.cat(validity, -1) # [B, W_sum] - return validity, start_frames_wins, h - - def clip(self, x, cond, x_len, win_length, start_frames=None): - '''Ramdom clip x to win_length. - Args: - x (tensor) : (B, c_in, T, n_bins). - cond (tensor) : (B, T, H). - x_len (tensor) : (B,). - win_length (int): target clip length - - Returns: - (tensor) : (B, c_in, win_length, n_bins). - - ''' - T_start = 0 - T_end = x_len.max() - win_length - if T_end < 0: - return None, None, start_frames - T_end = T_end.item() - if start_frames is None: - start_frame = np.random.randint(low=T_start, high=T_end + 1) - start_frames = [start_frame] * x.size(0) - else: - start_frame = start_frames[0] - x_batch = x[:, :, start_frame: start_frame + win_length] - c_batch = cond[:, start_frame: start_frame + win_length] if cond is not None else None - return x_batch, c_batch, start_frames - - -class Discriminator(nn.Module): - def __init__(self, time_lengths=[32, 64, 128], freq_length=80, cond_size=0, kernel=(3, 3), c_in=1, - hidden_size=128, norm_type='bn', reduction='sum', uncond_disc=True): - super(Discriminator, self).__init__() - self.time_lengths = time_lengths - self.cond_size = cond_size - self.reduction = reduction - self.uncond_disc = uncond_disc - if uncond_disc: - self.discriminator = MultiWindowDiscriminator( - freq_length=freq_length, - time_lengths=time_lengths, - kernel=kernel, - c_in=c_in, hidden_size=hidden_size, norm_type=norm_type, - reduction=reduction - ) - if cond_size > 0: - self.cond_disc = MultiWindowDiscriminator( - freq_length=freq_length, - time_lengths=time_lengths, - cond_size=cond_size, - kernel=kernel, - c_in=c_in, hidden_size=hidden_size, norm_type=norm_type, - reduction=reduction - ) - - def forward(self, x, cond=None, start_frames_wins=None): - """ - - :param x: [B, T, 80] - :param cond: [B, T, cond_size] - :param return_y_only: - :return: - """ - if len(x.shape) == 3: - x = x[:, None, :, :] - x_len = x.sum([1, -1]).ne(0).int().sum([-1]) - ret = {'y_c': None, 'y': None} - if self.uncond_disc: - ret['y'], start_frames_wins, ret['h'] = self.discriminator( - x, x_len, start_frames_wins=start_frames_wins) - if self.cond_size > 0 and cond is not None: - ret['y_c'], start_frames_wins, ret['h_c'] = self.cond_disc( - x, x_len, cond, start_frames_wins=start_frames_wins) - ret['start_frames_wins'] = start_frames_wins - return ret \ No newline at end of file diff --git a/spaces/AIZ2H/Gradio331-3D-Models-AI-1/app.py b/spaces/AIZ2H/Gradio331-3D-Models-AI-1/app.py deleted file mode 100644 index 62e7b60344f5957e86a9c0de3d77985f68b52224..0000000000000000000000000000000000000000 --- a/spaces/AIZ2H/Gradio331-3D-Models-AI-1/app.py +++ /dev/null @@ -1,24 +0,0 @@ -import time -import gradio as gr -import os - -def load_mesh(mesh_file_name): - return mesh_file_name, mesh_file_name - -demo = gr.Interface( - fn=load_mesh, - inputs=gr.Model3D(), - outputs=[ - gr.Model3D( - clear_color=[0.0, 0.0, 0.0, 0.0], label="3D Model"), - gr.File(label="Download 3D Model") - ], - examples=[ - [os.path.join(os.path.dirname(__file__), "files/Duck.glb")], - [os.path.join(os.path.dirname(__file__), "files/rubber_duck.glb")], - [os.path.join(os.path.dirname(__file__), "files/GroundVehicle.glb")] - ], -) - -if __name__ == "__main__": - demo.launch() \ No newline at end of file diff --git a/spaces/Abdulkader/Abdulkader-T5-MedRepAnalyzer/README.md b/spaces/Abdulkader/Abdulkader-T5-MedRepAnalyzer/README.md deleted file mode 100644 index bfb017eaa27c3c16b5d4d4f0aa0d3ed2d3c1ec17..0000000000000000000000000000000000000000 --- a/spaces/Abdulkader/Abdulkader-T5-MedRepAnalyzer/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Abdulkader T5 MedRepAnalyzer -emoji: ⚡ -colorFrom: yellow -colorTo: purple -sdk: gradio -sdk_version: 3.13.0 -app_file: app.py -pinned: false -license: cc-by-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AchyuthGamer/OpenGPT/client/css/stop-generating.css b/spaces/AchyuthGamer/OpenGPT/client/css/stop-generating.css deleted file mode 100644 index 3c2010d25065fbef63b104df743ef72c00259871..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/client/css/stop-generating.css +++ /dev/null @@ -1,38 +0,0 @@ -.stop-generating { - position: absolute; - bottom: 128px; - left: 50%; - transform: translateX(-50%); - z-index: 1000000; -} - -.stop-generating button { - backdrop-filter: blur(20px); - -webkit-backdrop-filter: blur(20px); - background-color: var(--blur-bg); - color: var(--colour-3); - cursor: pointer; - animation: show_popup 0.4s; -} - -@keyframes show_popup { - from { - opacity: 0; - transform: translateY(10px); - } -} - -@keyframes hide_popup { - to { - opacity: 0; - transform: translateY(10px); - } -} - -.stop-generating-hiding button { - animation: hide_popup 0.4s; -} - -.stop-generating-hidden button { - display: none; -} diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/lzstring.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/lzstring.js deleted file mode 100644 index 7846c01e338c06be4538e532771e6f8a9fbb0fdb..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/lzstring.js +++ /dev/null @@ -1,2 +0,0 @@ -import LZString from './string/lzstring/LZString.js'; -export default LZString; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dynamictext/DynamicText.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dynamictext/DynamicText.js deleted file mode 100644 index c244780d1854d087100eb69ce3aa6dcffb119089..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dynamictext/DynamicText.js +++ /dev/null @@ -1,2 +0,0 @@ -import DynamicText from '../../../plugins/dynamictext.js'; -export default DynamicText; \ No newline at end of file diff --git a/spaces/Ajit025/Text_to_Image_conversion/text_to_image.py b/spaces/Ajit025/Text_to_Image_conversion/text_to_image.py deleted file mode 100644 index 3e758a1a6bfd6f0a178e20fea0e8bfac04fc1f3f..0000000000000000000000000000000000000000 --- a/spaces/Ajit025/Text_to_Image_conversion/text_to_image.py +++ /dev/null @@ -1,51 +0,0 @@ -from transformers.tools.base import Tool, get_default_device -from transformers.utils import is_accelerate_available -import torch - -from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler - - -TEXT_TO_IMAGE_DESCRIPTION = ( - "This is a tool that creates an image according to a prompt, which is a text description. It takes an input named `prompt` which " - "contains the image description and outputs an image." -) - - -class TextToImageTool(Tool): - default_checkpoint = "runwayml/stable-diffusion-v1-5" - description = TEXT_TO_IMAGE_DESCRIPTION - inputs = ['text'] - outputs = ['image'] - - def __init__(self, device=None, **hub_kwargs) -> None: - if not is_accelerate_available(): - raise ImportError("Accelerate should be installed in order to use tools.") - - super().__init__() - - self.device = device - self.pipeline = None - self.hub_kwargs = hub_kwargs - - def setup(self): - if self.device is None: - self.device = get_default_device() - - self.pipeline = DiffusionPipeline.from_pretrained(self.default_checkpoint) - self.pipeline.scheduler = DPMSolverMultistepScheduler.from_config(self.pipeline.scheduler.config) - self.pipeline.to(self.device) - - if self.device.type == "cuda": - self.pipeline.to(torch_dtype=torch.float16) - - self.is_initialized = True - - def __call__(self, prompt): - if not self.is_initialized: - self.setup() - - negative_prompt = "low quality, bad quality, deformed, low resolution" - added_prompt = " , highest quality, highly realistic, very high resolution" - - return self.pipeline(prompt + added_prompt, negative_prompt=negative_prompt, num_inference_steps=25).images[0] - diff --git a/spaces/AkitoP/umamusume_bert_vits2/data_utils.py b/spaces/AkitoP/umamusume_bert_vits2/data_utils.py deleted file mode 100644 index d8e6b9e30b90839644e8a2c33c5166288b720d02..0000000000000000000000000000000000000000 --- a/spaces/AkitoP/umamusume_bert_vits2/data_utils.py +++ /dev/null @@ -1,406 +0,0 @@ -import os -import random -import torch -import torch.utils.data -from tqdm import tqdm -from loguru import logger -import commons -from mel_processing import spectrogram_torch, mel_spectrogram_torch -from utils import load_wav_to_torch, load_filepaths_and_text -from text import cleaned_text_to_sequence, get_bert - -"""Multi speaker version""" - - -class TextAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths_sid_text, hparams): - self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text) - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - self.spk_map = hparams.spk2id - self.hparams = hparams - - self.use_mel_spec_posterior = getattr( - hparams, "use_mel_posterior_encoder", False - ) - if self.use_mel_spec_posterior: - self.n_mel_channels = getattr(hparams, "n_mel_channels", 80) - - self.cleaned_text = getattr(hparams, "cleaned_text", False) - - self.add_blank = hparams.add_blank - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 300) - - random.seed(1234) - random.shuffle(self.audiopaths_sid_text) - self._filter() - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - - audiopaths_sid_text_new = [] - lengths = [] - skipped = 0 - logger.info("Init dataset...") - for _id, spk, language, text, phones, tone, word2ph in tqdm( - self.audiopaths_sid_text - ): - audiopath = f"{_id}" - if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len: - phones = phones.split(" ") - tone = [int(i) for i in tone.split(" ")] - word2ph = [int(i) for i in word2ph.split(" ")] - audiopaths_sid_text_new.append( - [audiopath, spk, language, text, phones, tone, word2ph] - ) - lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) - else: - skipped += 1 - logger.info( - "skipped: " - + str(skipped) - + ", total: " - + str(len(self.audiopaths_sid_text)) - ) - self.audiopaths_sid_text = audiopaths_sid_text_new - self.lengths = lengths - - def get_audio_text_speaker_pair(self, audiopath_sid_text): - # separate filename, speaker_id and text - audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text - - bert, ja_bert, phones, tone, language = self.get_text( - text, word2ph, phones, tone, language, audiopath - ) - - spec, wav = self.get_audio(audiopath) - sid = torch.LongTensor([int(self.spk_map[sid])]) - return (phones, spec, wav, sid, tone, language, bert, ja_bert) - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError( - "{} {} SR doesn't match target {} SR".format( - filename, sampling_rate, self.sampling_rate - ) - ) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if self.use_mel_spec_posterior: - spec_filename = spec_filename.replace(".spec.pt", ".mel.pt") - try: - spec = torch.load(spec_filename) - except: - if self.use_mel_spec_posterior: - spec = mel_spectrogram_torch( - audio_norm, - self.filter_length, - self.n_mel_channels, - self.sampling_rate, - self.hop_length, - self.win_length, - self.hparams.mel_fmin, - self.hparams.mel_fmax, - center=False, - ) - else: - spec = spectrogram_torch( - audio_norm, - self.filter_length, - self.sampling_rate, - self.hop_length, - self.win_length, - center=False, - ) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - return spec, audio_norm - - def get_text(self, text, word2ph, phone, tone, language_str, wav_path): - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - if self.add_blank: - phone = commons.intersperse(phone, 0) - tone = commons.intersperse(tone, 0) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - bert_path = wav_path.replace(".wav", ".bert.pt") - try: - bert = torch.load(bert_path) - assert bert.shape[-1] == len(phone) - except: - bert = get_bert(text, word2ph, language_str) - torch.save(bert, bert_path) - assert bert.shape[-1] == len(phone), phone - - if language_str == "ZH": - bert = bert - ja_bert = torch.zeros(768, len(phone)) - elif language_str == "JP": - ja_bert = bert - bert = torch.zeros(1024, len(phone)) - else: - bert = torch.zeros(1024, len(phone)) - ja_bert = torch.zeros(768, len(phone)) - assert bert.shape[-1] == len(phone), ( - bert.shape, - len(phone), - sum(word2ph), - p1, - p2, - t1, - t2, - pold, - pold2, - word2ph, - text, - w2pho, - ) - phone = torch.LongTensor(phone) - tone = torch.LongTensor(tone) - language = torch.LongTensor(language) - return bert, ja_bert, phone, tone, language - - def get_sid(self, sid): - sid = torch.LongTensor([int(sid)]) - return sid - - def __getitem__(self, index): - return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index]) - - def __len__(self): - return len(self.audiopaths_sid_text) - - -class TextAudioSpeakerCollate: - """Zero-pads model inputs and targets""" - - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text, audio and speaker identities - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized, sid] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True - ) - - max_text_len = max([len(x[0]) for x in batch]) - max_spec_len = max([x[1].size(1) for x in batch]) - max_wav_len = max([x[2].size(1) for x in batch]) - - text_lengths = torch.LongTensor(len(batch)) - spec_lengths = torch.LongTensor(len(batch)) - wav_lengths = torch.LongTensor(len(batch)) - sid = torch.LongTensor(len(batch)) - - text_padded = torch.LongTensor(len(batch), max_text_len) - tone_padded = torch.LongTensor(len(batch), max_text_len) - language_padded = torch.LongTensor(len(batch), max_text_len) - bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len) - ja_bert_padded = torch.FloatTensor(len(batch), 768, max_text_len) - - spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - text_padded.zero_() - tone_padded.zero_() - language_padded.zero_() - spec_padded.zero_() - wav_padded.zero_() - bert_padded.zero_() - ja_bert_padded.zero_() - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - text = row[0] - text_padded[i, : text.size(0)] = text - text_lengths[i] = text.size(0) - - spec = row[1] - spec_padded[i, :, : spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wav = row[2] - wav_padded[i, :, : wav.size(1)] = wav - wav_lengths[i] = wav.size(1) - - sid[i] = row[3] - - tone = row[4] - tone_padded[i, : tone.size(0)] = tone - - language = row[5] - language_padded[i, : language.size(0)] = language - - bert = row[6] - bert_padded[i, :, : bert.size(1)] = bert - - ja_bert = row[7] - ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert - - return ( - text_padded, - text_lengths, - spec_padded, - spec_lengths, - wav_padded, - wav_lengths, - sid, - tone_padded, - language_padded, - bert_padded, - ja_bert_padded, - ) - - -class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): - """ - Maintain similar input lengths in a batch. - Length groups are specified by boundaries. - Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. - - It removes samples which are not included in the boundaries. - Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. - """ - - def __init__( - self, - dataset, - batch_size, - boundaries, - num_replicas=None, - rank=None, - shuffle=True, - ): - super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) - self.lengths = dataset.lengths - self.batch_size = batch_size - self.boundaries = boundaries - - self.buckets, self.num_samples_per_bucket = self._create_buckets() - self.total_size = sum(self.num_samples_per_bucket) - self.num_samples = self.total_size // self.num_replicas - - def _create_buckets(self): - buckets = [[] for _ in range(len(self.boundaries) - 1)] - for i in range(len(self.lengths)): - length = self.lengths[i] - idx_bucket = self._bisect(length) - if idx_bucket != -1: - buckets[idx_bucket].append(i) - - try: - for i in range(len(buckets) - 1, 0, -1): - if len(buckets[i]) == 0: - buckets.pop(i) - self.boundaries.pop(i + 1) - assert all(len(bucket) > 0 for bucket in buckets) - # When one bucket is not traversed - except Exception as e: - print("Bucket warning ", e) - for i in range(len(buckets) - 1, -1, -1): - if len(buckets[i]) == 0: - buckets.pop(i) - self.boundaries.pop(i + 1) - - num_samples_per_bucket = [] - for i in range(len(buckets)): - len_bucket = len(buckets[i]) - total_batch_size = self.num_replicas * self.batch_size - rem = ( - total_batch_size - (len_bucket % total_batch_size) - ) % total_batch_size - num_samples_per_bucket.append(len_bucket + rem) - return buckets, num_samples_per_bucket - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch) - - indices = [] - if self.shuffle: - for bucket in self.buckets: - indices.append(torch.randperm(len(bucket), generator=g).tolist()) - else: - for bucket in self.buckets: - indices.append(list(range(len(bucket)))) - - batches = [] - for i in range(len(self.buckets)): - bucket = self.buckets[i] - len_bucket = len(bucket) - if len_bucket == 0: - continue - ids_bucket = indices[i] - num_samples_bucket = self.num_samples_per_bucket[i] - - # add extra samples to make it evenly divisible - rem = num_samples_bucket - len_bucket - ids_bucket = ( - ids_bucket - + ids_bucket * (rem // len_bucket) - + ids_bucket[: (rem % len_bucket)] - ) - - # subsample - ids_bucket = ids_bucket[self.rank :: self.num_replicas] - - # batching - for j in range(len(ids_bucket) // self.batch_size): - batch = [ - bucket[idx] - for idx in ids_bucket[ - j * self.batch_size : (j + 1) * self.batch_size - ] - ] - batches.append(batch) - - if self.shuffle: - batch_ids = torch.randperm(len(batches), generator=g).tolist() - batches = [batches[i] for i in batch_ids] - self.batches = batches - - assert len(self.batches) * self.batch_size == self.num_samples - return iter(self.batches) - - def _bisect(self, x, lo=0, hi=None): - if hi is None: - hi = len(self.boundaries) - 1 - - if hi > lo: - mid = (hi + lo) // 2 - if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]: - return mid - elif x <= self.boundaries[mid]: - return self._bisect(x, lo, mid) - else: - return self._bisect(x, mid + 1, hi) - else: - return -1 - - def __len__(self): - return self.num_samples // self.batch_size diff --git a/spaces/AliHaider0343/implicit-and-explicit-aspects-Extraction-in-Restaurant-Reviews-Domain/README.md b/spaces/AliHaider0343/implicit-and-explicit-aspects-Extraction-in-Restaurant-Reviews-Domain/README.md deleted file mode 100644 index 716dc7f58d2a5a31d2da2879aa599ebf438b82cd..0000000000000000000000000000000000000000 --- a/spaces/AliHaider0343/implicit-and-explicit-aspects-Extraction-in-Restaurant-Reviews-Domain/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Implicit And Explicit Aspects Extraction In Restaurant Reviews Domain -emoji: 🌖 -colorFrom: gray -colorTo: red -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git "a/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py" "b/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py" deleted file mode 100644 index 94ef256327f6740cdaddc6f5ecea5852a9210163..0000000000000000000000000000000000000000 --- "a/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py" +++ /dev/null @@ -1,106 +0,0 @@ -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from toolbox import CatchException, report_execption, write_results_to_file -from toolbox import update_ui - -def get_meta_information(url, chatbot, history): - import requests - import arxiv - import difflib - from bs4 import BeautifulSoup - from toolbox import get_conf - proxies, = get_conf('proxies') - headers = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36', - } - # 发送 GET 请求 - response = requests.get(url, proxies=proxies, headers=headers) - - # 解析网页内容 - soup = BeautifulSoup(response.text, "html.parser") - - def string_similar(s1, s2): - return difflib.SequenceMatcher(None, s1, s2).quick_ratio() - - profile = [] - # 获取所有文章的标题和作者 - for result in soup.select(".gs_ri"): - title = result.a.text.replace('\n', ' ').replace(' ', ' ') - author = result.select_one(".gs_a").text - try: - citation = result.select_one(".gs_fl > a[href*='cites']").text # 引用次数是链接中的文本,直接取出来 - except: - citation = 'cited by 0' - abstract = result.select_one(".gs_rs").text.strip() # 摘要在 .gs_rs 中的文本,需要清除首尾空格 - search = arxiv.Search( - query = title, - max_results = 1, - sort_by = arxiv.SortCriterion.Relevance, - ) - paper = next(search.results()) - if string_similar(title, paper.title) > 0.90: # same paper - abstract = paper.summary.replace('\n', ' ') - is_paper_in_arxiv = True - else: # different paper - abstract = abstract - is_paper_in_arxiv = False - paper = next(search.results()) - print(title) - print(author) - print(citation) - profile.append({ - 'title':title, - 'author':author, - 'citation':citation, - 'abstract':abstract, - 'is_paper_in_arxiv':is_paper_in_arxiv, - }) - - chatbot[-1] = [chatbot[-1][0], title + f'\n\n是否在arxiv中(不在arxiv中无法获取完整摘要):{is_paper_in_arxiv}\n\n' + abstract] - yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面 - return profile - -@CatchException -def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "分析用户提供的谷歌学术(google scholar)搜索页面中,出现的所有文章: binary-husky,插件初始化中..."]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import arxiv - from bs4 import BeautifulSoup - except: - report_execption(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4 arxiv```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - - meta_paper_info_list = yield from get_meta_information(txt, chatbot, history) - - if len(meta_paper_info_list[:10]) > 0: - i_say = "下面是一些学术文献的数据,请从中提取出以下内容。" + \ - "1、英文题目;2、中文题目翻译;3、作者;4、arxiv公开(is_paper_in_arxiv);4、引用数量(cite);5、中文摘要翻译。" + \ - f"以下是信息源:{str(meta_paper_info_list[:10])}" - - inputs_show_user = f"请分析此页面中出现的所有文章:{txt}" - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=inputs_show_user, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt="你是一个学术翻译,请从数据中提取信息。你必须使用Markdown格式。你必须逐个文献进行处理。" - ) - - history.extend([ "第一批", gpt_say ]) - meta_paper_info_list = meta_paper_info_list[10:] - - chatbot.append(["状态?", "已经全部完成"]) - msg = '正常' - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res)); - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 diff --git a/spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/latex/attention/introduction.tex b/spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/latex/attention/introduction.tex deleted file mode 100644 index 1baa8915f4cf7aec2520894a87470fc9436d954b..0000000000000000000000000000000000000000 --- a/spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/latex/attention/introduction.tex +++ /dev/null @@ -1,18 +0,0 @@ -Recurrent neural networks, long short-term memory \citep{hochreiter1997} and gated recurrent \citep{gruEval14} neural networks in particular, have been firmly established as state of the art approaches in sequence modeling and transduction problems such as language modeling and machine translation \citep{sutskever14, bahdanau2014neural, cho2014learning}. Numerous efforts have since continued to push the boundaries of recurrent language models and encoder-decoder architectures \citep{wu2016google,luong2015effective,jozefowicz2016exploring}. - -Recurrent models typically factor computation along the symbol positions of the input and output sequences. Aligning the positions to steps in computation time, they generate a sequence of hidden states $h_t$, as a function of the previous hidden state $h_{t-1}$ and the input for position $t$. This inherently sequential nature precludes parallelization within training examples, which becomes critical at longer sequence lengths, as memory constraints limit batching across examples. -%\marginpar{not sure if the memory constraints are understandable here} -Recent work has achieved significant improvements in computational efficiency through factorization tricks \citep{Kuchaiev2017Factorization} and conditional computation \citep{shazeer2017outrageously}, while also improving model performance in case of the latter. The fundamental constraint of sequential computation, however, remains. - -%\marginpar{@all: there is work on analyzing what attention really does in seq2seq models, couldn't find it right away} - -Attention mechanisms have become an integral part of compelling sequence modeling and transduction models in various tasks, allowing modeling of dependencies without regard to their distance in the input or output sequences \citep{bahdanau2014neural, structuredAttentionNetworks}. In all but a few cases \citep{decomposableAttnModel}, however, such attention mechanisms are used in conjunction with a recurrent network. - -%\marginpar{not sure if "cross-positional communication" is understandable without explanation} -%\marginpar{insert exact training times and stats for the model that reaches sota earliest, maybe even a single GPU model?} - -In this work we propose the Transformer, a model architecture eschewing recurrence and instead relying entirely on an attention mechanism to draw global dependencies between input and output. The Transformer allows for significantly more parallelization and can reach a new state of the art in translation quality after being trained for as little as twelve hours on eight P100 GPUs. -%\marginpar{you removed the constant number of repetitions part. I wrote it because I wanted to make it clear that the model does not only perform attention once, while it's also not recurrent. I thought that might be important to get across early.} - -% Just a standard paragraph with citations, rewrite. -%After the seminal papers of \citep{sutskever14}, \citep{bahdanau2014neural}, and \citep{cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation and language modeling with recurrent sequence models. Recent effort \citep{shazeer2017outrageously} has combined the power of conditional computation with sequence models to train very large models for machine translation, pushing SOTA at lower computational cost. Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state encumbers recurrnet models to process multiple inputs at once, and their time complexity is a linear function of the length of the input and output, both during training and inference. [What I want to say here is that although this is fine during decoding, at training time, we are given both input and output and this linear nature does not allow the RNN to process all inputs and outputs simultaneously and haven't been used on datasets that are the of the scale of the web. What's the largest dataset we have ? . Talk about Nividia and possibly other's effors to speed up things, and possibly other efforts that alleviate this, but are still limited by it's comptuational nature]. Rest of the intro: What if you could construct the state based on the actual inputs and outputs, then you could construct them all at once. This has been the foundation of many promising recent efforts, bytenet,facenet (Also talk about quasi rnn here). Now we talk about attention!! Along with cell architectures such as long short-term meory (LSTM) \citep{hochreiter1997}, and gated recurrent units (GRUs) \citep{cho2014learning}, attention has emerged as an essential ingredient in successful sequence models, in particular for machine translation. In recent years, many, if not all, state-of-the-art (SOTA) results in machine translation have been achieved with attention-based sequence models \citep{wu2016google,luong2015effective,jozefowicz2016exploring}. Talk about the neon work on how it played with attention to do self attention! Then talk about what we do. \ No newline at end of file diff --git a/spaces/Amrrs/DragGan-Inversion/stylegan_human/generate.py b/spaces/Amrrs/DragGan-Inversion/stylegan_human/generate.py deleted file mode 100644 index a8b7d55e6d190c193e427bd8d623c583b2dcdeda..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/stylegan_human/generate.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -# Copyright (c) 2019, NVIDIA Corporation. All rights reserved. -# This work is made available under the Nvidia Source Code License-NC. -# To view a copy of this license, visit -# https://nvlabs.github.io/stylegan2/license.html - - -## this script is for generating images from pre-trained network based on StyleGAN1 (TensorFlow) and StyleGAN2-ada (PyTorch) ## - -import os -import click -import dnnlib -import numpy as np -import PIL.Image -import legacy -from typing import List, Optional - -""" -Generate images using pretrained network pickle. -Examples: - -\b -# Generate human full-body images without truncation -python generate.py --outdir=outputs/generate/stylegan_human_v2_1024 --trunc=1 --seeds=1,3,5,7 \\ - --network=pretrained_models/stylegan_human_v2_1024.pkl --version 2 - -\b -# Generate human full-body images with truncation -python generate.py --outdir=outputs/generate/stylegan_human_v2_1024 --trunc=0.8 --seeds=0-100\\ - --network=pretrained_models/stylegan_human_v2_1024.pkl --version 2 - -# \b -# Generate human full-body images using stylegan V1 -# python generate.py --outdir=outputs/generate/stylegan_human_v1_1024 \\ -# --network=pretrained_models/stylegan_human_v1_1024.pkl --version 1 -""" - - -@click.command() -@click.pass_context -@click.option('--network', 'network_pkl', help='Network pickle filename', required=True) -@click.option('--seeds', type=legacy.num_range, help='List of random seeds') -@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True) -@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True) -@click.option('--outdir', help='Where to save the output images', default='outputs/generate/', type=str, required=True, metavar='DIR') -@click.option('--version', help="stylegan version, 1, 2 or 3", type=int, default=2) -def generate_images( - ctx: click.Context, - network_pkl: str, - seeds: Optional[List[int]], - truncation_psi: float, - noise_mode: str, - outdir: str, - version: int -): - - print('Loading networks from "%s"...' % network_pkl) - if version == 1: - import dnnlib.tflib as tflib - tflib.init_tf() - G, D, Gs = legacy.load_pkl(network_pkl) - - else: - import torch - device = torch.device('cuda') - with dnnlib.util.open_url(network_pkl) as f: - G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore - os.makedirs(outdir, exist_ok=True) - - if seeds is None: - ctx.fail('--seeds option is required.') - - # Generate images. - target_z = np.array([]) - target_w = np.array([]) - latent_out = outdir.replace('/images/', '') - for seed_idx, seed in enumerate(seeds): - if seed % 5000 == 0: - print('Generating image for seed %d (%d/%d) ...' % - (seed, seed_idx, len(seeds))) - - if version == 1: # stylegan v1 - z = np.random.RandomState(seed).randn(1, Gs.input_shape[1]) - # Generate image. - fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True) - if noise_mode == 'const': - randomize_noise = False - else: - randomize_noise = True - images = Gs.run(z, None, truncation_psi=truncation_psi, - randomize_noise=randomize_noise, output_transform=fmt) - PIL.Image.fromarray(images[0], 'RGB').save( - f'{outdir}/seed{seed:04d}.png') - - else: # stylegan v2/v3 - label = torch.zeros([1, G.c_dim], device=device) - z = torch.from_numpy(np.random.RandomState( - seed).randn(1, G.z_dim)).to(device) - if target_z.size == 0: - target_z = z.cpu() - else: - target_z = np.append(target_z, z.cpu(), axis=0) - - w = G.mapping(z, label, truncation_psi=truncation_psi) - img = G.synthesis(w, noise_mode=noise_mode, force_fp32=True) - if target_w.size == 0: - target_w = w.cpu() - else: - target_w = np.append(target_w, w.cpu(), axis=0) - - img = (img.permute(0, 2, 3, 1) * 127.5 + - 128).clamp(0, 255).to(torch.uint8) - PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save( - f'{outdir}/seed{seed:04d}.png') - # print(target_z) - # print(target_z.shape,target_w.shape) - - -# ---------------------------------------------------------------------------- - -if __name__ == "__main__": - generate_images() - -# ---------------------------------------------------------------------------- diff --git a/spaces/Andres99/Tune-A-Video-Training-UI/uploader.py b/spaces/Andres99/Tune-A-Video-Training-UI/uploader.py deleted file mode 100644 index d9e06ec02127db34016d3d7b550e88f820a737fe..0000000000000000000000000000000000000000 --- a/spaces/Andres99/Tune-A-Video-Training-UI/uploader.py +++ /dev/null @@ -1,44 +0,0 @@ -from __future__ import annotations - -from huggingface_hub import HfApi - - -class Uploader: - def __init__(self, hf_token: str | None): - self.hf_token = hf_token - - def upload(self, - folder_path: str, - repo_name: str, - organization: str = '', - repo_type: str = 'model', - private: bool = True, - delete_existing_repo: bool = False, - input_token: str | None = None) -> str: - - api = HfApi(token=self.hf_token if self.hf_token else input_token) - - if not folder_path: - raise ValueError - if not repo_name: - raise ValueError - if not organization: - organization = api.whoami()['name'] - - repo_id = f'{organization}/{repo_name}' - if delete_existing_repo: - try: - api.delete_repo(repo_id, repo_type=repo_type) - except Exception: - pass - try: - api.create_repo(repo_id, repo_type=repo_type, private=private) - api.upload_folder(repo_id=repo_id, - folder_path=folder_path, - path_in_repo='.', - repo_type=repo_type) - url = f'https://huggingface.co/{repo_id}' - message = f'Your model was successfully uploaded to {url}.' - except Exception as e: - message = str(e) - return message diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_xl.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_xl.md deleted file mode 100644 index 8486641da2c40ac9f68e1a4e50b5adbb0e96c4fa..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_xl.md +++ /dev/null @@ -1,427 +0,0 @@ - - -# Stable diffusion XL - -Stable Diffusion XL was proposed in [SDXL: Improving Latent Diffusion Models for High-Resolution Image Synthesis](https://arxiv.org/abs/2307.01952) by Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, Robin Rombach - -The abstract of the paper is the following: - -*We present SDXL, a latent diffusion model for text-to-image synthesis. Compared to previous versions of Stable Diffusion, SDXL leverages a three times larger UNet backbone: The increase of model parameters is mainly due to more attention blocks and a larger cross-attention context as SDXL uses a second text encoder. We design multiple novel conditioning schemes and train SDXL on multiple aspect ratios. We also introduce a refinement model which is used to improve the visual fidelity of samples generated by SDXL using a post-hoc image-to-image technique. We demonstrate that SDXL shows drastically improved performance compared the previous versions of Stable Diffusion and achieves results competitive with those of black-box state-of-the-art image generators.* - -## Tips - -- Stable Diffusion XL works especially well with images between 768 and 1024. -- Stable Diffusion XL can pass a different prompt for each of the text encoders it was trained on as shown below. We can even pass different parts of the same prompt to the text encoders. -- Stable Diffusion XL output image can be improved by making use of a refiner as shown below. - -### Available checkpoints: - -- *Text-to-Image (1024x1024 resolution)*: [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) with [`StableDiffusionXLPipeline`] -- *Image-to-Image / Refiner (1024x1024 resolution)*: [stabilityai/stable-diffusion-xl-refiner-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0) with [`StableDiffusionXLImg2ImgPipeline`] - -## Usage Example - -Before using SDXL make sure to have `transformers`, `accelerate`, `safetensors` and `invisible_watermark` installed. -You can install the libraries as follows: - -``` -pip install transformers -pip install accelerate -pip install safetensors -``` - -### Watermarker - -We recommend to add an invisible watermark to images generating by Stable Diffusion XL, this can help with identifying if an image is machine-synthesised for downstream applications. To do so, please install -the [invisible-watermark library](https://pypi.org/project/invisible-watermark/) via: - -``` -pip install invisible-watermark>=0.2.0 -``` - -If the `invisible-watermark` library is installed the watermarker will be used **by default**. - -If you have other provisions for generating or deploying images safely, you can disable the watermarker as follows: - -```py -pipe = StableDiffusionXLPipeline.from_pretrained(..., add_watermarker=False) -``` - -### Text-to-Image - -You can use SDXL as follows for *text-to-image*: - -```py -from diffusers import StableDiffusionXLPipeline -import torch - -pipe = StableDiffusionXLPipeline.from_pretrained( - "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True -) -pipe.to("cuda") - -prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" -image = pipe(prompt=prompt).images[0] -``` - -### Image-to-image - -You can use SDXL as follows for *image-to-image*: - -```py -import torch -from diffusers import StableDiffusionXLImg2ImgPipeline -from diffusers.utils import load_image - -pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained( - "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True -) -pipe = pipe.to("cuda") -url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png" - -init_image = load_image(url).convert("RGB") -prompt = "a photo of an astronaut riding a horse on mars" -image = pipe(prompt, image=init_image).images[0] -``` - -### Inpainting - -You can use SDXL as follows for *inpainting* - -```py -import torch -from diffusers import StableDiffusionXLInpaintPipeline -from diffusers.utils import load_image - -pipe = StableDiffusionXLInpaintPipeline.from_pretrained( - "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True -) -pipe.to("cuda") - -img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" -mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" - -init_image = load_image(img_url).convert("RGB") -mask_image = load_image(mask_url).convert("RGB") - -prompt = "A majestic tiger sitting on a bench" -image = pipe(prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=50, strength=0.80).images[0] -``` - -### Refining the image output - -In addition to the [base model checkpoint](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0), -StableDiffusion-XL also includes a [refiner checkpoint](huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0) -that is specialized in denoising low-noise stage images to generate images of improved high-frequency quality. -This refiner checkpoint can be used as a "second-step" pipeline after having run the base checkpoint to improve -image quality. - -When using the refiner, one can easily -- 1.) employ the base model and refiner as an *Ensemble of Expert Denoisers* as first proposed in [eDiff-I](https://research.nvidia.com/labs/dir/eDiff-I/) or -- 2.) simply run the refiner in [SDEdit](https://arxiv.org/abs/2108.01073) fashion after the base model. - -**Note**: The idea of using SD-XL base & refiner as an ensemble of experts was first brought forward by -a couple community contributors which also helped shape the following `diffusers` implementation, namely: -- [SytanSD](https://github.com/SytanSD) -- [bghira](https://github.com/bghira) -- [Birch-san](https://github.com/Birch-san) -- [AmericanPresidentJimmyCarter](https://github.com/AmericanPresidentJimmyCarter) - -#### 1.) Ensemble of Expert Denoisers - -When using the base and refiner model as an ensemble of expert of denoisers, the base model should serve as the -expert for the high-noise diffusion stage and the refiner serves as the expert for the low-noise diffusion stage. - -The advantage of 1.) over 2.) is that it requires less overall denoising steps and therefore should be significantly -faster. The drawback is that one cannot really inspect the output of the base model; it will still be heavily denoised. - -To use the base model and refiner as an ensemble of expert denoisers, make sure to define the span -of timesteps which should be run through the high-noise denoising stage (*i.e.* the base model) and the low-noise -denoising stage (*i.e.* the refiner model) respectively. We can set the intervals using the [`denoising_end`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLPipeline.__call__.denoising_end) of the base model -and [`denoising_start`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLImg2ImgPipeline.__call__.denoising_start) of the refiner model. - -For both `denoising_end` and `denoising_start` a float value between 0 and 1 should be passed. -When passed, the end and start of denoising will be defined by proportions of discrete timesteps as -defined by the model schedule. -Note that this will override `strength` if it is also declared, since the number of denoising steps -is determined by the discrete timesteps the model was trained on and the declared fractional cutoff. - -Let's look at an example. -First, we import the two pipelines. Since the text encoders and variational autoencoder are the same -you don't have to load those again for the refiner. - -```py -from diffusers import DiffusionPipeline -import torch - -base = DiffusionPipeline.from_pretrained( - "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True -) -base.to("cuda") - -refiner = DiffusionPipeline.from_pretrained( - "stabilityai/stable-diffusion-xl-refiner-1.0", - text_encoder_2=base.text_encoder_2, - vae=base.vae, - torch_dtype=torch.float16, - use_safetensors=True, - variant="fp16", -) -refiner.to("cuda") -``` - -Now we define the number of inference steps and the point at which the model shall be run through the -high-noise denoising stage (*i.e.* the base model). - -```py -n_steps = 40 -high_noise_frac = 0.8 -``` - -Stable Diffusion XL base is trained on timesteps 0-999 and Stable Diffusion XL refiner is finetuned -from the base model on low noise timesteps 0-199 inclusive, so we use the base model for the first -800 timesteps (high noise) and the refiner for the last 200 timesteps (low noise). Hence, `high_noise_frac` -is set to 0.8, so that all steps 200-999 (the first 80% of denoising timesteps) are performed by the -base model and steps 0-199 (the last 20% of denoising timesteps) are performed by the refiner model. - -Remember, the denoising process starts at **high value** (high noise) timesteps and ends at -**low value** (low noise) timesteps. - -Let's run the two pipelines now. Make sure to set `denoising_end` and -`denoising_start` to the same values and keep `num_inference_steps` constant. Also remember that -the output of the base model should be in latent space: - -```py -prompt = "A majestic lion jumping from a big stone at night" - -image = base( - prompt=prompt, - num_inference_steps=n_steps, - denoising_end=high_noise_frac, - output_type="latent", -).images -image = refiner( - prompt=prompt, - num_inference_steps=n_steps, - denoising_start=high_noise_frac, - image=image, -).images[0] -``` - -Let's have a look at the images - -| Original Image | Ensemble of Denoisers Experts | -|---|---| -| ![lion_base_timesteps](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lion_base.png) | ![lion_refined_timesteps](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lion_refined.png) - -If we would have just run the base model on the same 40 steps, the image would have been arguably less detailed (e.g. the lion eyes and nose): - - - -The ensemble-of-experts method works well on all available schedulers! - - - -#### 2.) Refining the image output from fully denoised base image - -In standard [`StableDiffusionImg2ImgPipeline`]-fashion, the fully-denoised image generated of the base model -can be further improved using the [refiner checkpoint](huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0). - -For this, you simply run the refiner as a normal image-to-image pipeline after the "base" text-to-image -pipeline. You can leave the outputs of the base model in latent space. - -```py -from diffusers import DiffusionPipeline -import torch - -pipe = DiffusionPipeline.from_pretrained( - "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True -) -pipe.to("cuda") - -refiner = DiffusionPipeline.from_pretrained( - "stabilityai/stable-diffusion-xl-refiner-1.0", - text_encoder_2=pipe.text_encoder_2, - vae=pipe.vae, - torch_dtype=torch.float16, - use_safetensors=True, - variant="fp16", -) -refiner.to("cuda") - -prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" - -image = pipe(prompt=prompt, output_type="latent" if use_refiner else "pil").images[0] -image = refiner(prompt=prompt, image=image[None, :]).images[0] -``` - -| Original Image | Refined Image | -|---|---| -| ![](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/sd_xl/init_image.png) | ![](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/sd_xl/refined_image.png) | - - - -The refiner can also very well be used in an in-painting setting. To do so just make - sure you use the [`StableDiffusionXLInpaintPipeline`] classes as shown below - - - -To use the refiner for inpainting in the Ensemble of Expert Denoisers setting you can do the following: - -```py -from diffusers import StableDiffusionXLInpaintPipeline -from diffusers.utils import load_image - -pipe = StableDiffusionXLInpaintPipeline.from_pretrained( - "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True -) -pipe.to("cuda") - -refiner = StableDiffusionXLInpaintPipeline.from_pretrained( - "stabilityai/stable-diffusion-xl-refiner-1.0", - text_encoder_2=pipe.text_encoder_2, - vae=pipe.vae, - torch_dtype=torch.float16, - use_safetensors=True, - variant="fp16", -) -refiner.to("cuda") - -img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" -mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" - -init_image = load_image(img_url).convert("RGB") -mask_image = load_image(mask_url).convert("RGB") - -prompt = "A majestic tiger sitting on a bench" -num_inference_steps = 75 -high_noise_frac = 0.7 - -image = pipe( - prompt=prompt, - image=init_image, - mask_image=mask_image, - num_inference_steps=num_inference_steps, - denoising_start=high_noise_frac, - output_type="latent", -).images -image = refiner( - prompt=prompt, - image=image, - mask_image=mask_image, - num_inference_steps=num_inference_steps, - denoising_start=high_noise_frac, -).images[0] -``` - -To use the refiner for inpainting in the standard SDE-style setting, simply remove `denoising_end` and `denoising_start` and choose a smaller -number of inference steps for the refiner. - -### Loading single file checkpoints / original file format - -By making use of [`~diffusers.loaders.FromSingleFileMixin.from_single_file`] you can also load the -original file format into `diffusers`: - -```py -from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline -import torch - -pipe = StableDiffusionXLPipeline.from_single_file( - "./sd_xl_base_1.0.safetensors", torch_dtype=torch.float16, variant="fp16", use_safetensors=True -) -pipe.to("cuda") - -refiner = StableDiffusionXLImg2ImgPipeline.from_single_file( - "./sd_xl_refiner_1.0.safetensors", torch_dtype=torch.float16, use_safetensors=True, variant="fp16" -) -refiner.to("cuda") -``` - -### Memory optimization via model offloading - -If you are seeing out-of-memory errors, we recommend making use of [`StableDiffusionXLPipeline.enable_model_cpu_offload`]. - -```diff -- pipe.to("cuda") -+ pipe.enable_model_cpu_offload() -``` - -and - -```diff -- refiner.to("cuda") -+ refiner.enable_model_cpu_offload() -``` - -### Speed-up inference with `torch.compile` - -You can speed up inference by making use of `torch.compile`. This should give you **ca.** 20% speed-up. - -```diff -+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) -+ refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True) -``` - -### Running with `torch < 2.0` - -**Note** that if you want to run Stable Diffusion XL with `torch` < 2.0, please make sure to enable xformers -attention: - -``` -pip install xformers -``` - -```diff -+pipe.enable_xformers_memory_efficient_attention() -+refiner.enable_xformers_memory_efficient_attention() -``` - -## StableDiffusionXLPipeline - -[[autodoc]] StableDiffusionXLPipeline - - all - - __call__ - -## StableDiffusionXLImg2ImgPipeline - -[[autodoc]] StableDiffusionXLImg2ImgPipeline - - all - - __call__ - -## StableDiffusionXLInpaintPipeline - -[[autodoc]] StableDiffusionXLInpaintPipeline - - all - - __call__ - -### Passing different prompts to each text-encoder - -Stable Diffusion XL was trained on two text encoders. The default behavior is to pass the same prompt to each. But it is possible to pass a different prompt for each text-encoder, as [some users](https://github.com/huggingface/diffusers/issues/4004#issuecomment-1627764201) noted that it can boost quality. -To do so, you can pass `prompt_2` and `negative_prompt_2` in addition to `prompt` and `negative_prompt`. By doing that, you will pass the original prompts and negative prompts (as in `prompt` and `negative_prompt`) to `text_encoder` (in official SDXL 0.9/1.0 that is [OpenAI CLIP-ViT/L-14](https://huggingface.co/openai/clip-vit-large-patch14)), -and `prompt_2` and `negative_prompt_2` to `text_encoder_2` (in official SDXL 0.9/1.0 that is [OpenCLIP-ViT/bigG-14](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)). - -```py -from diffusers import StableDiffusionXLPipeline -import torch - -pipe = StableDiffusionXLPipeline.from_pretrained( - "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True -) -pipe.to("cuda") - -# prompt will be passed to OAI CLIP-ViT/L-14 -prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" -# prompt_2 will be passed to OpenCLIP-ViT/bigG-14 -prompt_2 = "monet painting" -image = pipe(prompt=prompt, prompt_2=prompt_2).images[0] -``` diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py deleted file mode 100644 index 432619a79ddd32d288893e3021a14ab6893b370a..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py +++ /dev/null @@ -1,82 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import unittest - -from diffusers import FlaxStableDiffusionInpaintPipeline -from diffusers.utils import is_flax_available, load_image, slow -from diffusers.utils.testing_utils import require_flax - - -if is_flax_available(): - import jax - import jax.numpy as jnp - from flax.jax_utils import replicate - from flax.training.common_utils import shard - - -@slow -@require_flax -class FlaxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - - def test_stable_diffusion_inpaint_pipeline(self): - init_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" - "/sd2-inpaint/init_image.png" - ) - mask_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" - ) - - model_id = "xvjiarui/stable-diffusion-2-inpainting" - pipeline, params = FlaxStableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None) - - prompt = "Face of a yellow cat, high resolution, sitting on a park bench" - - prng_seed = jax.random.PRNGKey(0) - num_inference_steps = 50 - - num_samples = jax.device_count() - prompt = num_samples * [prompt] - init_image = num_samples * [init_image] - mask_image = num_samples * [mask_image] - prompt_ids, processed_masked_images, processed_masks = pipeline.prepare_inputs(prompt, init_image, mask_image) - - # shard inputs and rng - params = replicate(params) - prng_seed = jax.random.split(prng_seed, jax.device_count()) - prompt_ids = shard(prompt_ids) - processed_masked_images = shard(processed_masked_images) - processed_masks = shard(processed_masks) - - output = pipeline( - prompt_ids, processed_masks, processed_masked_images, params, prng_seed, num_inference_steps, jit=True - ) - - images = output.images.reshape(num_samples, 512, 512, 3) - - image_slice = images[0, 253:256, 253:256, -1] - - output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) - expected_slice = jnp.array( - [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] - ) - print(f"output_slice: {output_slice}") - assert jnp.abs(output_slice - expected_slice).max() < 1e-2 diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/core/anchor/__init__.py b/spaces/Andy1621/uniformer_image_detection/mmdet/core/anchor/__init__.py deleted file mode 100644 index 5838ff3eefb03bc83928fa13848cea9ff8647827..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/core/anchor/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator, - YOLOAnchorGenerator) -from .builder import ANCHOR_GENERATORS, build_anchor_generator -from .point_generator import PointGenerator -from .utils import anchor_inside_flags, calc_region, images_to_levels - -__all__ = [ - 'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags', - 'PointGenerator', 'images_to_levels', 'calc_region', - 'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator' -] diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 00b2594ba8a1c9edc90cca7a6d7c3334fa209edc..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/ann_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py deleted file mode 100644 index 796ba3fb142394c4d93a29ba57548dca59d8d02b..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3_r50-d8_512x512_80k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 398d9759cafc1d01e78c138abd249808531a97b9..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' -model = dict( - pretrained='torchvision://resnet101', - backbone=dict(type='ResNet', depth=101)) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py b/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py deleted file mode 100644 index 1a3c43495bbf9d302216d7ddf62df75446907a36..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './psanet_r50-d8_512x512_20k_voc12aug.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/AnimalEquality/chatbot/_proc/styles.css b/spaces/AnimalEquality/chatbot/_proc/styles.css deleted file mode 100644 index 66ccc49ee8f0e73901dac02dc4e9224b7d1b2c78..0000000000000000000000000000000000000000 --- a/spaces/AnimalEquality/chatbot/_proc/styles.css +++ /dev/null @@ -1,37 +0,0 @@ -.cell { - margin-bottom: 1rem; -} - -.cell > .sourceCode { - margin-bottom: 0; -} - -.cell-output > pre { - margin-bottom: 0; -} - -.cell-output > pre, .cell-output > .sourceCode > pre, .cell-output-stdout > pre { - margin-left: 0.8rem; - margin-top: 0; - background: none; - border-left: 2px solid lightsalmon; - border-top-left-radius: 0; - border-top-right-radius: 0; -} - -.cell-output > .sourceCode { - border: none; -} - -.cell-output > .sourceCode { - background: none; - margin-top: 0; -} - -div.description { - padding-left: 2px; - padding-top: 5px; - font-style: italic; - font-size: 135%; - opacity: 70%; -} diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/pipelines/llava/pipelines.py b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/pipelines/llava/pipelines.py deleted file mode 100644 index 0f650c1ab1a0f66bf79ce72d052db43b96801b6d..0000000000000000000000000000000000000000 --- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/pipelines/llava/pipelines.py +++ /dev/null @@ -1,27 +0,0 @@ -from typing import Optional - -from extensions.multimodal.abstract_pipeline import AbstractMultimodalPipeline - -available_pipelines = ['llava-7b', 'llava-13b'] - - -def get_pipeline(name: str, params: dict) -> Optional[AbstractMultimodalPipeline]: - if name == 'llava-7b': - from .llava import LLaVA_v0_7B_Pipeline - return LLaVA_v0_7B_Pipeline(params) - if name == 'llava-13b': - from .llava import LLaVA_v0_13B_Pipeline - return LLaVA_v0_13B_Pipeline(params) - return None - - -def get_pipeline_from_model_name(model_name: str, params: dict) -> Optional[AbstractMultimodalPipeline]: - if 'llava' not in model_name.lower(): - return None - if '7b' in model_name.lower(): - from .llava import LLaVA_v0_7B_Pipeline - return LLaVA_v0_7B_Pipeline(params) - if '13b' in model_name.lower(): - from .llava import LLaVA_v0_13B_Pipeline - return LLaVA_v0_13B_Pipeline(params) - return None diff --git a/spaces/Ank0X0/Image-Upscaling-Playground/app.py b/spaces/Ank0X0/Image-Upscaling-Playground/app.py deleted file mode 100644 index 1f3736667bfd4e5ac6d9ee2ef9b95416cb80f9c0..0000000000000000000000000000000000000000 --- a/spaces/Ank0X0/Image-Upscaling-Playground/app.py +++ /dev/null @@ -1,85 +0,0 @@ -import numpy as np -import cv2 -import onnxruntime -import gradio as gr - - -def pre_process(img: np.array) -> np.array: - # H, W, C -> C, H, W - img = np.transpose(img[:, :, 0:3], (2, 0, 1)) - # C, H, W -> 1, C, H, W - img = np.expand_dims(img, axis=0).astype(np.float32) - return img - - -def post_process(img: np.array) -> np.array: - # 1, C, H, W -> C, H, W - img = np.squeeze(img) - # C, H, W -> H, W, C - img = np.transpose(img, (1, 2, 0))[:, :, ::-1].astype(np.uint8) - return img - - -def inference(model_path: str, img_array: np.array) -> np.array: - options = onnxruntime.SessionOptions() - options.intra_op_num_threads = 1 - options.inter_op_num_threads = 1 - ort_session = onnxruntime.InferenceSession(model_path, options) - ort_inputs = {ort_session.get_inputs()[0].name: img_array} - ort_outs = ort_session.run(None, ort_inputs) - - return ort_outs[0] - - -def convert_pil_to_cv2(image): - # pil_image = image.convert("RGB") - open_cv_image = np.array(image) - # RGB to BGR - open_cv_image = open_cv_image[:, :, ::-1].copy() - return open_cv_image - - -def upscale(image, model): - model_path = f"models/{model}.ort" - img = convert_pil_to_cv2(image) - if img.ndim == 2: - img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - - if img.shape[2] == 4: - alpha = img[:, :, 3] # GRAY - alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2BGR) # BGR - alpha_output = post_process(inference(model_path, pre_process(alpha))) # BGR - alpha_output = cv2.cvtColor(alpha_output, cv2.COLOR_BGR2GRAY) # GRAY - - img = img[:, :, 0:3] # BGR - image_output = post_process(inference(model_path, pre_process(img))) # BGR - image_output = cv2.cvtColor(image_output, cv2.COLOR_BGR2BGRA) # BGRA - image_output[:, :, 3] = alpha_output - - elif img.shape[2] == 3: - image_output = post_process(inference(model_path, pre_process(img))) # BGR - - return image_output - - -css = ".output-image, .input-image, .image-preview {height: 480px !important} " -model_choices = ["modelx2", "modelx2 25 JXL", "modelx4", "minecraft_modelx4"] - -gr.Interface( - fn=upscale, - inputs=[ - gr.inputs.Image(type="pil", label="Input Image"), - gr.inputs.Radio( - model_choices, - type="value", - default=None, - label="Choose Upscaler", - optional=False, - ), - ], - outputs="image", - title="Image Upscaling 🦆", - description="Model: [Anchor-based Plain Net for Mobile Image Super-Resolution](https://arxiv.org/abs/2105.09750). Repository: [SR Mobile PyTorch](https://github.com/w11wo/sr_mobile_pytorch)", - allow_flagging="never", - css=css, -).launch() diff --git a/spaces/Annelisseishere/Streamlit_GPT/README.md b/spaces/Annelisseishere/Streamlit_GPT/README.md deleted file mode 100644 index d690b01b20e2640c17f802059f3306685323045c..0000000000000000000000000000000000000000 --- a/spaces/Annelisseishere/Streamlit_GPT/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Streamlit GPT -emoji: 🏢 -colorFrom: indigo -colorTo: blue -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Asahi402/anime-remove-background/README.md b/spaces/Asahi402/anime-remove-background/README.md deleted file mode 100644 index 1ba3cb5ea0e994e246d57b7d62b8aa5a6331901c..0000000000000000000000000000000000000000 --- a/spaces/Asahi402/anime-remove-background/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Anime Remove Background -emoji: 🪄🖼️ -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: skytnt/anime-remove-background ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/show.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/show.py deleted file mode 100644 index 3f10701f6b28c72b62c9904fec37b96bdd199dcc..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/show.py +++ /dev/null @@ -1,189 +0,0 @@ -import logging -from optparse import Values -from typing import Generator, Iterable, Iterator, List, NamedTuple, Optional - -from pip._vendor.packaging.utils import canonicalize_name - -from pip._internal.cli.base_command import Command -from pip._internal.cli.status_codes import ERROR, SUCCESS -from pip._internal.metadata import BaseDistribution, get_default_environment -from pip._internal.utils.misc import write_output - -logger = logging.getLogger(__name__) - - -class ShowCommand(Command): - """ - Show information about one or more installed packages. - - The output is in RFC-compliant mail header format. - """ - - usage = """ - %prog [options] ...""" - ignore_require_venv = True - - def add_options(self) -> None: - self.cmd_opts.add_option( - "-f", - "--files", - dest="files", - action="store_true", - default=False, - help="Show the full list of installed files for each package.", - ) - - self.parser.insert_option_group(0, self.cmd_opts) - - def run(self, options: Values, args: List[str]) -> int: - if not args: - logger.warning("ERROR: Please provide a package name or names.") - return ERROR - query = args - - results = search_packages_info(query) - if not print_results( - results, list_files=options.files, verbose=options.verbose - ): - return ERROR - return SUCCESS - - -class _PackageInfo(NamedTuple): - name: str - version: str - location: str - editable_project_location: Optional[str] - requires: List[str] - required_by: List[str] - installer: str - metadata_version: str - classifiers: List[str] - summary: str - homepage: str - project_urls: List[str] - author: str - author_email: str - license: str - entry_points: List[str] - files: Optional[List[str]] - - -def search_packages_info(query: List[str]) -> Generator[_PackageInfo, None, None]: - """ - Gather details from installed distributions. Print distribution name, - version, location, and installed files. Installed files requires a - pip generated 'installed-files.txt' in the distributions '.egg-info' - directory. - """ - env = get_default_environment() - - installed = {dist.canonical_name: dist for dist in env.iter_all_distributions()} - query_names = [canonicalize_name(name) for name in query] - missing = sorted( - [name for name, pkg in zip(query, query_names) if pkg not in installed] - ) - if missing: - logger.warning("Package(s) not found: %s", ", ".join(missing)) - - def _get_requiring_packages(current_dist: BaseDistribution) -> Iterator[str]: - return ( - dist.metadata["Name"] or "UNKNOWN" - for dist in installed.values() - if current_dist.canonical_name - in {canonicalize_name(d.name) for d in dist.iter_dependencies()} - ) - - for query_name in query_names: - try: - dist = installed[query_name] - except KeyError: - continue - - requires = sorted((req.name for req in dist.iter_dependencies()), key=str.lower) - required_by = sorted(_get_requiring_packages(dist), key=str.lower) - - try: - entry_points_text = dist.read_text("entry_points.txt") - entry_points = entry_points_text.splitlines(keepends=False) - except FileNotFoundError: - entry_points = [] - - files_iter = dist.iter_declared_entries() - if files_iter is None: - files: Optional[List[str]] = None - else: - files = sorted(files_iter) - - metadata = dist.metadata - - yield _PackageInfo( - name=dist.raw_name, - version=str(dist.version), - location=dist.location or "", - editable_project_location=dist.editable_project_location, - requires=requires, - required_by=required_by, - installer=dist.installer, - metadata_version=dist.metadata_version or "", - classifiers=metadata.get_all("Classifier", []), - summary=metadata.get("Summary", ""), - homepage=metadata.get("Home-page", ""), - project_urls=metadata.get_all("Project-URL", []), - author=metadata.get("Author", ""), - author_email=metadata.get("Author-email", ""), - license=metadata.get("License", ""), - entry_points=entry_points, - files=files, - ) - - -def print_results( - distributions: Iterable[_PackageInfo], - list_files: bool, - verbose: bool, -) -> bool: - """ - Print the information from installed distributions found. - """ - results_printed = False - for i, dist in enumerate(distributions): - results_printed = True - if i > 0: - write_output("---") - - write_output("Name: %s", dist.name) - write_output("Version: %s", dist.version) - write_output("Summary: %s", dist.summary) - write_output("Home-page: %s", dist.homepage) - write_output("Author: %s", dist.author) - write_output("Author-email: %s", dist.author_email) - write_output("License: %s", dist.license) - write_output("Location: %s", dist.location) - if dist.editable_project_location is not None: - write_output( - "Editable project location: %s", dist.editable_project_location - ) - write_output("Requires: %s", ", ".join(dist.requires)) - write_output("Required-by: %s", ", ".join(dist.required_by)) - - if verbose: - write_output("Metadata-Version: %s", dist.metadata_version) - write_output("Installer: %s", dist.installer) - write_output("Classifiers:") - for classifier in dist.classifiers: - write_output(" %s", classifier) - write_output("Entry-points:") - for entry in dist.entry_points: - write_output(" %s", entry.strip()) - write_output("Project-URLs:") - for project_url in dist.project_urls: - write_output(" %s", project_url) - if list_files: - write_output("Files:") - if dist.files is None: - write_output("Cannot locate RECORD or installed-files.txt") - else: - for line in dist.files: - write_output(" %s", line.strip()) - return results_printed diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/discovery.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/discovery.py deleted file mode 100644 index 98fc2a7f487da55a23b962793158911848800211..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/discovery.py +++ /dev/null @@ -1,600 +0,0 @@ -"""Automatic discovery of Python modules and packages (for inclusion in the -distribution) and other config values. - -For the purposes of this module, the following nomenclature is used: - -- "src-layout": a directory representing a Python project that contains a "src" - folder. Everything under the "src" folder is meant to be included in the - distribution when packaging the project. Example:: - - . - ├── tox.ini - ├── pyproject.toml - └── src/ - └── mypkg/ - ├── __init__.py - ├── mymodule.py - └── my_data_file.txt - -- "flat-layout": a Python project that does not use "src-layout" but instead - have a directory under the project root for each package:: - - . - ├── tox.ini - ├── pyproject.toml - └── mypkg/ - ├── __init__.py - ├── mymodule.py - └── my_data_file.txt - -- "single-module": a project that contains a single Python script direct under - the project root (no directory used):: - - . - ├── tox.ini - ├── pyproject.toml - └── mymodule.py - -""" - -import itertools -import os -from fnmatch import fnmatchcase -from glob import glob -from pathlib import Path -from typing import ( - TYPE_CHECKING, - Callable, - Dict, - Iterable, - Iterator, - List, - Mapping, - Optional, - Tuple, - Union -) - -import _distutils_hack.override # noqa: F401 - -from distutils import log -from distutils.util import convert_path - -_Path = Union[str, os.PathLike] -_Filter = Callable[[str], bool] -StrIter = Iterator[str] - -chain_iter = itertools.chain.from_iterable - -if TYPE_CHECKING: - from setuptools import Distribution # noqa - - -def _valid_name(path: _Path) -> bool: - # Ignore invalid names that cannot be imported directly - return os.path.basename(path).isidentifier() - - -class _Finder: - """Base class that exposes functionality for module/package finders""" - - ALWAYS_EXCLUDE: Tuple[str, ...] = () - DEFAULT_EXCLUDE: Tuple[str, ...] = () - - @classmethod - def find( - cls, - where: _Path = '.', - exclude: Iterable[str] = (), - include: Iterable[str] = ('*',) - ) -> List[str]: - """Return a list of all Python items (packages or modules, depending on - the finder implementation) found within directory 'where'. - - 'where' is the root directory which will be searched. - It should be supplied as a "cross-platform" (i.e. URL-style) path; - it will be converted to the appropriate local path syntax. - - 'exclude' is a sequence of names to exclude; '*' can be used - as a wildcard in the names. - When finding packages, 'foo.*' will exclude all subpackages of 'foo' - (but not 'foo' itself). - - 'include' is a sequence of names to include. - If it's specified, only the named items will be included. - If it's not specified, all found items will be included. - 'include' can contain shell style wildcard patterns just like - 'exclude'. - """ - - exclude = exclude or cls.DEFAULT_EXCLUDE - return list( - cls._find_iter( - convert_path(str(where)), - cls._build_filter(*cls.ALWAYS_EXCLUDE, *exclude), - cls._build_filter(*include), - ) - ) - - @classmethod - def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter: - raise NotImplementedError - - @staticmethod - def _build_filter(*patterns: str) -> _Filter: - """ - Given a list of patterns, return a callable that will be true only if - the input matches at least one of the patterns. - """ - return lambda name: any(fnmatchcase(name, pat) for pat in patterns) - - -class PackageFinder(_Finder): - """ - Generate a list of all Python packages found within a directory - """ - - ALWAYS_EXCLUDE = ("ez_setup", "*__pycache__") - - @classmethod - def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter: - """ - All the packages found in 'where' that pass the 'include' filter, but - not the 'exclude' filter. - """ - for root, dirs, files in os.walk(str(where), followlinks=True): - # Copy dirs to iterate over it, then empty dirs. - all_dirs = dirs[:] - dirs[:] = [] - - for dir in all_dirs: - full_path = os.path.join(root, dir) - rel_path = os.path.relpath(full_path, where) - package = rel_path.replace(os.path.sep, '.') - - # Skip directory trees that are not valid packages - if '.' in dir or not cls._looks_like_package(full_path, package): - continue - - # Should this package be included? - if include(package) and not exclude(package): - yield package - - # Keep searching subdirectories, as there may be more packages - # down there, even if the parent was excluded. - dirs.append(dir) - - @staticmethod - def _looks_like_package(path: _Path, _package_name: str) -> bool: - """Does a directory look like a package?""" - return os.path.isfile(os.path.join(path, '__init__.py')) - - -class PEP420PackageFinder(PackageFinder): - @staticmethod - def _looks_like_package(_path: _Path, _package_name: str) -> bool: - return True - - -class ModuleFinder(_Finder): - """Find isolated Python modules. - This function will **not** recurse subdirectories. - """ - - @classmethod - def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter: - for file in glob(os.path.join(where, "*.py")): - module, _ext = os.path.splitext(os.path.basename(file)) - - if not cls._looks_like_module(module): - continue - - if include(module) and not exclude(module): - yield module - - _looks_like_module = staticmethod(_valid_name) - - -# We have to be extra careful in the case of flat layout to not include files -# and directories not meant for distribution (e.g. tool-related) - - -class FlatLayoutPackageFinder(PEP420PackageFinder): - _EXCLUDE = ( - "ci", - "bin", - "doc", - "docs", - "documentation", - "manpages", - "news", - "changelog", - "test", - "tests", - "unit_test", - "unit_tests", - "example", - "examples", - "scripts", - "tools", - "util", - "utils", - "python", - "build", - "dist", - "venv", - "env", - "requirements", - # ---- Task runners / Build tools ---- - "tasks", # invoke - "fabfile", # fabric - "site_scons", # SCons - # ---- Other tools ---- - "benchmark", - "benchmarks", - "exercise", - "exercises", - # ---- Hidden directories/Private packages ---- - "[._]*", - ) - - DEFAULT_EXCLUDE = tuple(chain_iter((p, f"{p}.*") for p in _EXCLUDE)) - """Reserved package names""" - - @staticmethod - def _looks_like_package(_path: _Path, package_name: str) -> bool: - names = package_name.split('.') - # Consider PEP 561 - root_pkg_is_valid = names[0].isidentifier() or names[0].endswith("-stubs") - return root_pkg_is_valid and all(name.isidentifier() for name in names[1:]) - - -class FlatLayoutModuleFinder(ModuleFinder): - DEFAULT_EXCLUDE = ( - "setup", - "conftest", - "test", - "tests", - "example", - "examples", - "build", - # ---- Task runners ---- - "toxfile", - "noxfile", - "pavement", - "dodo", - "tasks", - "fabfile", - # ---- Other tools ---- - "[Ss][Cc]onstruct", # SCons - "conanfile", # Connan: C/C++ build tool - "manage", # Django - "benchmark", - "benchmarks", - "exercise", - "exercises", - # ---- Hidden files/Private modules ---- - "[._]*", - ) - """Reserved top-level module names""" - - -def _find_packages_within(root_pkg: str, pkg_dir: _Path) -> List[str]: - nested = PEP420PackageFinder.find(pkg_dir) - return [root_pkg] + [".".join((root_pkg, n)) for n in nested] - - -class ConfigDiscovery: - """Fill-in metadata and options that can be automatically derived - (from other metadata/options, the file system or conventions) - """ - - def __init__(self, distribution: "Distribution"): - self.dist = distribution - self._called = False - self._disabled = False - self._skip_ext_modules = False - - def _disable(self): - """Internal API to disable automatic discovery""" - self._disabled = True - - def _ignore_ext_modules(self): - """Internal API to disregard ext_modules. - - Normally auto-discovery would not be triggered if ``ext_modules`` are set - (this is done for backward compatibility with existing packages relying on - ``setup.py`` or ``setup.cfg``). However, ``setuptools`` can call this function - to ignore given ``ext_modules`` and proceed with the auto-discovery if - ``packages`` and ``py_modules`` are not given (e.g. when using pyproject.toml - metadata). - """ - self._skip_ext_modules = True - - @property - def _root_dir(self) -> _Path: - # The best is to wait until `src_root` is set in dist, before using _root_dir. - return self.dist.src_root or os.curdir - - @property - def _package_dir(self) -> Dict[str, str]: - if self.dist.package_dir is None: - return {} - return self.dist.package_dir - - def __call__(self, force=False, name=True, ignore_ext_modules=False): - """Automatically discover missing configuration fields - and modifies the given ``distribution`` object in-place. - - Note that by default this will only have an effect the first time the - ``ConfigDiscovery`` object is called. - - To repeatedly invoke automatic discovery (e.g. when the project - directory changes), please use ``force=True`` (or create a new - ``ConfigDiscovery`` instance). - """ - if force is False and (self._called or self._disabled): - # Avoid overhead of multiple calls - return - - self._analyse_package_layout(ignore_ext_modules) - if name: - self.analyse_name() # depends on ``packages`` and ``py_modules`` - - self._called = True - - def _explicitly_specified(self, ignore_ext_modules: bool) -> bool: - """``True`` if the user has specified some form of package/module listing""" - ignore_ext_modules = ignore_ext_modules or self._skip_ext_modules - ext_modules = not (self.dist.ext_modules is None or ignore_ext_modules) - return ( - self.dist.packages is not None - or self.dist.py_modules is not None - or ext_modules - or hasattr(self.dist, "configuration") and self.dist.configuration - # ^ Some projects use numpy.distutils.misc_util.Configuration - ) - - def _analyse_package_layout(self, ignore_ext_modules: bool) -> bool: - if self._explicitly_specified(ignore_ext_modules): - # For backward compatibility, just try to find modules/packages - # when nothing is given - return True - - log.debug( - "No `packages` or `py_modules` configuration, performing " - "automatic discovery." - ) - - return ( - self._analyse_explicit_layout() - or self._analyse_src_layout() - # flat-layout is the trickiest for discovery so it should be last - or self._analyse_flat_layout() - ) - - def _analyse_explicit_layout(self) -> bool: - """The user can explicitly give a package layout via ``package_dir``""" - package_dir = self._package_dir.copy() # don't modify directly - package_dir.pop("", None) # This falls under the "src-layout" umbrella - root_dir = self._root_dir - - if not package_dir: - return False - - log.debug(f"`explicit-layout` detected -- analysing {package_dir}") - pkgs = chain_iter( - _find_packages_within(pkg, os.path.join(root_dir, parent_dir)) - for pkg, parent_dir in package_dir.items() - ) - self.dist.packages = list(pkgs) - log.debug(f"discovered packages -- {self.dist.packages}") - return True - - def _analyse_src_layout(self) -> bool: - """Try to find all packages or modules under the ``src`` directory - (or anything pointed by ``package_dir[""]``). - - The "src-layout" is relatively safe for automatic discovery. - We assume that everything within is meant to be included in the - distribution. - - If ``package_dir[""]`` is not given, but the ``src`` directory exists, - this function will set ``package_dir[""] = "src"``. - """ - package_dir = self._package_dir - src_dir = os.path.join(self._root_dir, package_dir.get("", "src")) - if not os.path.isdir(src_dir): - return False - - log.debug(f"`src-layout` detected -- analysing {src_dir}") - package_dir.setdefault("", os.path.basename(src_dir)) - self.dist.package_dir = package_dir # persist eventual modifications - self.dist.packages = PEP420PackageFinder.find(src_dir) - self.dist.py_modules = ModuleFinder.find(src_dir) - log.debug(f"discovered packages -- {self.dist.packages}") - log.debug(f"discovered py_modules -- {self.dist.py_modules}") - return True - - def _analyse_flat_layout(self) -> bool: - """Try to find all packages and modules under the project root. - - Since the ``flat-layout`` is more dangerous in terms of accidentally including - extra files/directories, this function is more conservative and will raise an - error if multiple packages or modules are found. - - This assumes that multi-package dists are uncommon and refuse to support that - use case in order to be able to prevent unintended errors. - """ - log.debug(f"`flat-layout` detected -- analysing {self._root_dir}") - return self._analyse_flat_packages() or self._analyse_flat_modules() - - def _analyse_flat_packages(self) -> bool: - self.dist.packages = FlatLayoutPackageFinder.find(self._root_dir) - top_level = remove_nested_packages(remove_stubs(self.dist.packages)) - log.debug(f"discovered packages -- {self.dist.packages}") - self._ensure_no_accidental_inclusion(top_level, "packages") - return bool(top_level) - - def _analyse_flat_modules(self) -> bool: - self.dist.py_modules = FlatLayoutModuleFinder.find(self._root_dir) - log.debug(f"discovered py_modules -- {self.dist.py_modules}") - self._ensure_no_accidental_inclusion(self.dist.py_modules, "modules") - return bool(self.dist.py_modules) - - def _ensure_no_accidental_inclusion(self, detected: List[str], kind: str): - if len(detected) > 1: - from inspect import cleandoc - - from setuptools.errors import PackageDiscoveryError - - msg = f"""Multiple top-level {kind} discovered in a flat-layout: {detected}. - - To avoid accidental inclusion of unwanted files or directories, - setuptools will not proceed with this build. - - If you are trying to create a single distribution with multiple {kind} - on purpose, you should not rely on automatic discovery. - Instead, consider the following options: - - 1. set up custom discovery (`find` directive with `include` or `exclude`) - 2. use a `src-layout` - 3. explicitly set `py_modules` or `packages` with a list of names - - To find more information, look for "package discovery" on setuptools docs. - """ - raise PackageDiscoveryError(cleandoc(msg)) - - def analyse_name(self): - """The packages/modules are the essential contribution of the author. - Therefore the name of the distribution can be derived from them. - """ - if self.dist.metadata.name or self.dist.name: - # get_name() is not reliable (can return "UNKNOWN") - return None - - log.debug("No `name` configuration, performing automatic discovery") - - name = ( - self._find_name_single_package_or_module() - or self._find_name_from_packages() - ) - if name: - self.dist.metadata.name = name - - def _find_name_single_package_or_module(self) -> Optional[str]: - """Exactly one module or package""" - for field in ('packages', 'py_modules'): - items = getattr(self.dist, field, None) or [] - if items and len(items) == 1: - log.debug(f"Single module/package detected, name: {items[0]}") - return items[0] - - return None - - def _find_name_from_packages(self) -> Optional[str]: - """Try to find the root package that is not a PEP 420 namespace""" - if not self.dist.packages: - return None - - packages = remove_stubs(sorted(self.dist.packages, key=len)) - package_dir = self.dist.package_dir or {} - - parent_pkg = find_parent_package(packages, package_dir, self._root_dir) - if parent_pkg: - log.debug(f"Common parent package detected, name: {parent_pkg}") - return parent_pkg - - log.warn("No parent package detected, impossible to derive `name`") - return None - - -def remove_nested_packages(packages: List[str]) -> List[str]: - """Remove nested packages from a list of packages. - - >>> remove_nested_packages(["a", "a.b1", "a.b2", "a.b1.c1"]) - ['a'] - >>> remove_nested_packages(["a", "b", "c.d", "c.d.e.f", "g.h", "a.a1"]) - ['a', 'b', 'c.d', 'g.h'] - """ - pkgs = sorted(packages, key=len) - top_level = pkgs[:] - size = len(pkgs) - for i, name in enumerate(reversed(pkgs)): - if any(name.startswith(f"{other}.") for other in top_level): - top_level.pop(size - i - 1) - - return top_level - - -def remove_stubs(packages: List[str]) -> List[str]: - """Remove type stubs (:pep:`561`) from a list of packages. - - >>> remove_stubs(["a", "a.b", "a-stubs", "a-stubs.b.c", "b", "c-stubs"]) - ['a', 'a.b', 'b'] - """ - return [pkg for pkg in packages if not pkg.split(".")[0].endswith("-stubs")] - - -def find_parent_package( - packages: List[str], package_dir: Mapping[str, str], root_dir: _Path -) -> Optional[str]: - """Find the parent package that is not a namespace.""" - packages = sorted(packages, key=len) - common_ancestors = [] - for i, name in enumerate(packages): - if not all(n.startswith(f"{name}.") for n in packages[i+1:]): - # Since packages are sorted by length, this condition is able - # to find a list of all common ancestors. - # When there is divergence (e.g. multiple root packages) - # the list will be empty - break - common_ancestors.append(name) - - for name in common_ancestors: - pkg_path = find_package_path(name, package_dir, root_dir) - init = os.path.join(pkg_path, "__init__.py") - if os.path.isfile(init): - return name - - return None - - -def find_package_path( - name: str, package_dir: Mapping[str, str], root_dir: _Path -) -> str: - """Given a package name, return the path where it should be found on - disk, considering the ``package_dir`` option. - - >>> path = find_package_path("my.pkg", {"": "root/is/nested"}, ".") - >>> path.replace(os.sep, "/") - './root/is/nested/my/pkg' - - >>> path = find_package_path("my.pkg", {"my": "root/is/nested"}, ".") - >>> path.replace(os.sep, "/") - './root/is/nested/pkg' - - >>> path = find_package_path("my.pkg", {"my.pkg": "root/is/nested"}, ".") - >>> path.replace(os.sep, "/") - './root/is/nested' - - >>> path = find_package_path("other.pkg", {"my.pkg": "root/is/nested"}, ".") - >>> path.replace(os.sep, "/") - './other/pkg' - """ - parts = name.split(".") - for i in range(len(parts), 0, -1): - # Look backwards, the most specific package_dir first - partial_name = ".".join(parts[:i]) - if partial_name in package_dir: - parent = package_dir[partial_name] - return os.path.join(root_dir, parent, *parts[i:]) - - parent = package_dir.get("") or "" - return os.path.join(root_dir, *parent.split("/"), *parts) - - -def construct_package_dir(packages: List[str], package_path: _Path) -> Dict[str, str]: - parent_pkgs = remove_nested_packages(packages) - prefix = Path(package_path).parts - return {pkg: "/".join([*prefix, *pkg.split(".")]) for pkg in parent_pkgs} diff --git a/spaces/Azurro/APT-1B-Base/README.md b/spaces/Azurro/APT-1B-Base/README.md deleted file mode 100644 index 51c40e7115619a9696b587322663e0b11cc38d59..0000000000000000000000000000000000000000 --- a/spaces/Azurro/APT-1B-Base/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: APT-1B-Base -emoji: 💻 -colorFrom: gray -colorTo: blue -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false -license: cc-by-nc-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/BIOML-SVM/SVM/proteinbind_new.py b/spaces/BIOML-SVM/SVM/proteinbind_new.py deleted file mode 100644 index 37a62abb09e7089e62631f7fd1320048e4f32842..0000000000000000000000000000000000000000 --- a/spaces/BIOML-SVM/SVM/proteinbind_new.py +++ /dev/null @@ -1,283 +0,0 @@ -from types import SimpleNamespace - -import pandas as pd -import torch -import torch.nn as nn -from torch.utils.data import Dataset - - -ModalityType = SimpleNamespace( - AA="aa", - DNA="dna", - PDB="pdb", - GO="go", - MSA="msa", - TEXT="text", -) - - -class Normalize(nn.Module): - def __init__(self, dim: int) -> None: - super().__init__() - self.dim = dim - - def forward(self, x): - return torch.nn.functional.normalize(x, dim=self.dim, p=2) - - -class EmbeddingDataset(Dataset): - """ - The main class for turning any modality to a torch Dataset that can be passed to - a torch dataloader. Any modality that doesn't fit into the __getitem__ - method can subclass this and modify the __getitem__ method. - """ - def __init__(self, sequence_file_path, embeddings_file_path, modality): - self.sequence = pd.read_csv(sequence_file_path) - self.embedding = torch.load(embeddings_file_path) - self.modality = modality - - def __len__(self): - return len(self.sequence) - - def __getitem__(self, idx): - sequence = self.sequence.iloc[idx, 0] - embedding = self.embedding[idx] - return {"aa": sequence, self.modality: embedding} - - -class DualEmbeddingDataset(Dataset): - """ - The main class for turning any modality to a torch Dataset that can be passed to - a torch dataloader. Any modality that doesn't fit into the __getitem__ - method can subclass this and modify the __getitem__ method. - """ - def __init__(self, sequence_embeddings_file_path, embeddings_file_path, modality): - self.sequence_embedding = torch.load(sequence_embeddings_file_path) - self.embedding = torch.load(embeddings_file_path) - self.modality = modality - - def __len__(self): - return len(self.sequence_embedding) - - def __getitem__(self, idx): - sequence_embedding = self.sequence_embedding[idx] - embedding = self.embedding[idx] - return {"aa": sequence_embedding, self.modality: embedding} - - -class ProteinBindModel(nn.Module): - - def __init__( - self, - aa_embed_dim, - dna_embed_dim, - pdb_embed_dim, - go_embed_dim, - msa_embed_dim, - text_embed_dim, - in_embed_dim, - out_embed_dim - ): - super().__init__() - self.modality_trunks = self._create_modality_trunk( - aa_embed_dim, - dna_embed_dim, - pdb_embed_dim, - go_embed_dim, - msa_embed_dim, - text_embed_dim, - out_embed_dim - ) - self.modality_heads = self._create_modality_head( - in_embed_dim, - out_embed_dim, - ) - self.modality_postprocessors = self._create_modality_postprocessors( - out_embed_dim - ) - - def _create_modality_trunk( - self, - aa_embed_dim, - dna_embed_dim, - pdb_embed_dim, - go_embed_dim, - msa_embed_dim, - text_embed_dim, - in_embed_dim - ): - """ - The current layers are just a proof of concept - and are subject to the opinion of others. - :param aa_embed_dim: - :param dna_embed_dim: - :param pdb_embed_dim: - :param go_embed_dim: - :param msa_embed_dim: - :param text_embed_dim: - :param in_embed_dim: - :return: - """ - modality_trunks = {} - - modality_trunks[ModalityType.AA] = nn.Sequential( - nn.Linear(aa_embed_dim, 512), - nn.ReLU(), - nn.Linear(512, 512), - nn.ReLU(), - nn.Linear(512, in_embed_dim), - ) - - modality_trunks[ModalityType.DNA] = nn.Sequential( - nn.Linear(dna_embed_dim, 512), - nn.ReLU(), - nn.Linear(512, 512), - nn.ReLU(), - nn.Linear(512, in_embed_dim), - ) - - modality_trunks[ModalityType.PDB] = nn.Sequential( - nn.Linear(pdb_embed_dim, 512), - nn.ReLU(), - nn.Linear(512, 512), - nn.ReLU(), - nn.Linear(512, in_embed_dim), - ) - - modality_trunks[ModalityType.GO] = nn.Sequential( - nn.Linear(go_embed_dim, 512), - nn.ReLU(), - nn.Linear(512, 512), - nn.ReLU(), - nn.Linear(512, in_embed_dim), - ) - - modality_trunks[ModalityType.MSA] = nn.Sequential( - nn.Linear(msa_embed_dim, 512), - nn.ReLU(), - nn.Linear(512, 512), - nn.ReLU(), - nn.Linear(512, in_embed_dim), - ) - - modality_trunks[ModalityType.TEXT] = nn.Sequential( - nn.Linear(text_embed_dim, 512), - nn.ReLU(), - nn.Linear(512, 512), - nn.ReLU(), - nn.Linear(512, in_embed_dim), - ) - - return nn.ModuleDict(modality_trunks) - - def _create_modality_head( - self, - in_embed_dim, - out_embed_dim - ): - modality_heads = {} - - modality_heads[ModalityType.AA] = nn.Sequential( - nn.LayerNorm(normalized_shape=in_embed_dim, eps=1e-6), - nn.Dropout(p=0.5), - nn.Linear(in_embed_dim, out_embed_dim, bias=False), - ) - - modality_heads[ModalityType.DNA] = nn.Sequential( - nn.LayerNorm(normalized_shape=in_embed_dim, eps=1e-6), - nn.Dropout(p=0.5), - nn.Linear(in_embed_dim, out_embed_dim, bias=False), - ) - - modality_heads[ModalityType.PDB] = nn.Sequential( - nn.LayerNorm(normalized_shape=in_embed_dim, eps=1e-6), - nn.Dropout(p=0.5), - nn.Linear(in_embed_dim, out_embed_dim, bias=False), - ) - - modality_heads[ModalityType.GO] = nn.Sequential( - nn.LayerNorm(normalized_shape=in_embed_dim, eps=1e-6), - nn.Dropout(p=0.5), - nn.Linear(in_embed_dim, out_embed_dim, bias=False), - ) - - modality_heads[ModalityType.MSA] = nn.Sequential( - nn.LayerNorm(normalized_shape=in_embed_dim, eps=1e-6), - nn.Dropout(p=0.5), - nn.Linear(in_embed_dim, out_embed_dim, bias=False), - ) - - modality_heads[ModalityType.TEXT] = nn.Sequential( - nn.LayerNorm(normalized_shape=in_embed_dim, eps=1e-6), - nn.Dropout(p=0.5), - nn.Linear(in_embed_dim, out_embed_dim, bias=False), - ) - return nn.ModuleDict(modality_heads) - - def _create_modality_postprocessors(self, out_embed_dim): - modality_postprocessors = {} - modality_postprocessors[ModalityType.AA] = Normalize(dim=-1) - modality_postprocessors[ModalityType.DNA] = Normalize(dim=-1) - modality_postprocessors[ModalityType.PDB] = Normalize(dim=-1) - modality_postprocessors[ModalityType.TEXT] = Normalize(dim=-1) - modality_postprocessors[ModalityType.GO] = Normalize(dim=-1) - modality_postprocessors[ModalityType.MSA] = Normalize(dim=-1) - - return nn.ModuleDict(modality_postprocessors) - - def forward(self, inputs): - """ - input = {k_1: [v],k_n: [v]} - for key in input - get trunk for key - forward pass of value in trunk - get projection head of key - forward pass of value in projection head - append output in output dict - return { k_1, [o], k_n: [o]} - """ - - outputs = {} - - for modality_key, modality_value in inputs.items(): - - modality_value = self.modality_trunks[modality_key]( - modality_value - ) - - modality_value = self.modality_heads[modality_key]( - modality_value - ) - - modality_value = self.modality_postprocessors[modality_key]( - modality_value - ) - outputs[modality_key] = modality_value - - return outputs - - -def create_proteinbind(pretrained=False): - """ - The embedding dimensions here are dummy - :param pretrained: - :return: - """ - model = ProteinBindModel( - aa_embed_dim=480, - dna_embed_dim=1280, - pdb_embed_dim=128, - go_embed_dim=600, - msa_embed_dim=768, - text_embed_dim=768, - in_embed_dim=1024, - out_embed_dim=1024 - ) - - if pretrained: - # get path from config - PATH = 'best_model.pth' - - model.load_state_dict(torch.load(PATH)) - - return model diff --git a/spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/layers.py b/spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/layers.py deleted file mode 100644 index 4fc1b5cb85a3327f60cbb9f5deffbeeaaac516ad..0000000000000000000000000000000000000000 --- a/spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/layers.py +++ /dev/null @@ -1,118 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/spaces/Benson/text-generation/Examples/Alquimia Clsico 2 Mod Apk.md b/spaces/Benson/text-generation/Examples/Alquimia Clsico 2 Mod Apk.md deleted file mode 100644 index a95f4f8ed264b378f72fa82892db7d6386b64e48..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Alquimia Clsico 2 Mod Apk.md +++ /dev/null @@ -1,121 +0,0 @@ -
-

Proyecto deriva 2.0 Mod APK 43: Todo lo que necesita saber

-

Si eres un fan de los juegos de carreras, especialmente los juegos de deriva, es posible que hayas oído hablar de Project Drift 2.0, un juego de simulación de deriva realista y desafiante para dispositivos Android. En este artículo, le diremos todo lo que necesita saber sobre Project Drift 2.0 Mod APK 43, una versión modificada del juego que le da dinero ilimitado y acceso a todos los coches y pistas en el juego. También compartiremos algunos consejos y trucos sobre cómo jugar el juego y dominar el arte de la deriva.

-

alquimia clásico 2 mod apk


Download Zip ✵✵✵ https://bltlly.com/2v6Jf8



-

¿Qué es Project Drift 2.0?

-

Project Drift 2.0 es una secuela del popular juego Project Drift, desarrollado por Bycodec Games, un estudio de juegos indie turco. El juego está diseñado para proporcionar una experiencia de deriva realista e inmersiva, con gráficos impresionantes, manejo de automóviles basado en la física y varios modos de juego y desafíos.

-

Características de la deriva del proyecto 2.0

-

Algunas de las características de Project Drift 2.0 son:

- -

Cómo descargar e instalar el proyecto Drift 2.0 Mod APK 43

- -

Para descargar e instalar Project Drift 2.0 Mod APK 43, siga estos pasos:

-
    -
  1. Ir a [HappyMod]( 1 ), un sitio web que proporciona descargas apk mod seguro y confiable para varios juegos y aplicaciones.
  2. -
  3. Buscar "Proyecto de deriva 2.0 Mod APK" en la barra de búsqueda.
  4. -
  5. Seleccione la versión que dice "Proyecto de deriva 2.0 Mod Apk [dinero ilimitado]". Asegúrese de que coincide con el número de versión "43".
  6. -
  7. Haga clic en el botón "Descargar" y espere a que el archivo se descargue en su dispositivo.
  8. -
  9. Una vez que el archivo se descarga, localizarlo en el administrador de archivos y toque en él para instalarlo.
  10. -
  11. Si ves una ventana emergente que dice "Instalar bloqueado", ve a la configuración de tu dispositivo y habilita "Fuentes desconocidas" en las opciones de seguridad.
  12. -
  13. Después de instalar el apk mod, lanzar el juego y disfrutar de dinero ilimitado y el acceso a todos los coches y pistas en el juego.
  14. -
-

¿Por qué usar Project Drift 2.0 Mod APK 43?

-

Es posible que se pregunte por qué debe utilizar Project Drift 2.0 Mod APK 43 en lugar de la versión original del juego. Bueno, hay algunos beneficios y riesgos de usar el apk mod que usted debe ser consciente de antes de decidir usarlo.

-

Beneficios de la deriva del proyecto 2.0 Mod APK 43

-

Algunos de los beneficios de usar Project Drift 2.0 Mod APK 43 son:

- -

Riesgos de la deriva del proyecto 2.0 Mod APK 43

-

Algunos de los riesgos de usar Project Drift 2.0 Mod APK 43 son:

-

- -

Consejos y trucos para jugar Project Drift 2.0

-

Si decide utilizar Project Drift 2.0 Mod APK 43 o la versión original del juego, es posible que desee saber algunos consejos y trucos sobre cómo jugar el juego y dominar el arte de la deriva. Estos son algunos de ellos:

-

Cómo dominar la deriva en Project Drift 2.0

-

Drifting es la principal habilidad que necesitas dominar en Project Drift 2.0, ya que es la forma de ganar puntos y reputación en el juego. A la deriva es cuando se desliza su coche de lado alrededor de una esquina o curva, manteniendo el control y la velocidad. Para ir a la deriva en Project Drift 2.0, debes seguir estos pasos:

-
    -
  1. Seleccione un coche que tenga buen manejo y potencia, ya que estos son esenciales para la deriva.
  2. -
  3. Seleccione una pista que tenga curvas y giros agudos, ya que son ideales para la deriva.
  4. -
  5. A medida que se acerca a una esquina o curva, toque el pedal del freno para reducir su velocidad e iniciar una deriva.
  6. -
  7. Al entrar en la deriva, dirigir su coche en la dirección opuesta de la vuelta, mientras que golpea el pedal del acelerador para mantener su impulso y equilibrio.
  8. -
  9. Al salir de la deriva, dirigir su coche de nuevo en línea con la carretera, mientras que la liberación del pedal de gas para recuperar la tracción y la estabilidad.
  10. -
-

Cómo desbloquear nuevos coches y pistas en Project Drift 2.0

- - -

Cómo personalizar su coche en Project Drift 2.0

-

Uno de los aspectos divertidos de Project Drift 2.0 es que puedes personalizar tu coche para que se vea y funcione mejor. Usted puede cambiar el color, la pintura, las calcomanías, las ruedas, los neumáticos, los alerones, los escapes, y más de su coche. También puede actualizar el motor, la transmisión, la suspensión, los frenos y más de su coche. Para personalizar tu coche en Project Drift 2.0, sigue estos pasos:

-
    -
  1. Seleccione un coche que desea personalizar desde su garaje.
  2. -
  3. Toque en el botón "Personalizar" en la parte inferior de la pantalla.
  4. -
  5. Elija la categoría que desea personalizar, como apariencia o rendimiento.
  6. -
  7. Seleccione el elemento que desea cambiar o actualizar, como color o motor.
  8. -
  9. Elija la opción que desea aplicar, como rojo o turbo.
  10. -
  11. Toque en el botón "Aplicar" para confirmar sus cambios.
  12. -
  13. Toque en el botón "Atrás" para regresar a su garaje.
  14. -
-

Conclusión

- -

Llamada a la acción para los lectores

-

Si usted está listo para empezar a la deriva en Project Drift 2.0, descargar el juego de [Google Play] o [HappyMod] ahora y disfrutar de la emoción de deslizar su coche alrededor de las esquinas y curvas. No olvides compartir tus comentarios y opiniones sobre el juego con nosotros en la sección de comentarios a continuación. ¡Feliz deriva!

-

Preguntas frecuentes

-

Aquí hay algunas preguntas frecuentes sobre Project Drift 2.0:

-
    -
  1. ¿Cuál es la diferencia entre Proyecto de deriva 2.0 Mod APK 43 y proyecto de deriva 2.0 Hack APK?
  2. -

    Proyecto deriva 2.0 Mod APK 43 es una versión modificada del juego que le da dinero ilimitado y acceso a todos los coches y pistas en el juego. Proyecto de deriva 2.0 Hack APK es una versión hackeada del juego que le da dinero ilimitado, acceso a todos los coches y pistas, y otros trucos, tales como la invencibilidad, aumento de velocidad, o auto deriva. Ambas versiones no son oficiales y pueden tener algunos riesgos, como problemas de compatibilidad, pérdida de datos, prohibiciones o malware.

    -
  3. ¿Cómo actualizar Project Drift 2.0 Mod APK 43?
  4. -

    Si utiliza Project Drift 2.0 Mod APK 43, es posible que no sea capaz de actualizar el juego de Google Play, ya que podría detectar que está utilizando una versión modificada del juego y le impide actualizar. Para actualizar Project Drift 2.0 Mod APK 43, usted tendrá que descargar e instalar la última versión del mod apk de [HappyMod] u otra fuente confiable. Sin embargo, es posible que pierda su progreso o datos en el juego si actualiza el apk mod, así que asegúrese de hacer una copia de seguridad de sus datos antes de actualizar.

    -
  5. Cómo jugar proyecto deriva 2.0 sin conexión?
  6. -

    Project Drift 2.0 es un juego en línea que requiere una conexión a Internet para jugar. Sin embargo, puedes jugar algunas partes del juego sin conexión, como el modo de viaje libre y el modo de ataque de tiempo. Para jugar Project Drift 2.0 sin conexión, siga estos pasos:

    -
      -
    1. Inicie el juego mientras tiene una conexión a Internet.
    2. - -
    3. Seleccione un coche y una pista que desea reproducir.
    4. -
    5. Espera a que el juego cargue el coche y la pista.
    6. -
    7. Apague su conexión a Internet o cambie al modo avión en su dispositivo.
    8. -
    9. Disfruta jugando Project Drift 2.0 sin conexión.
    10. -
    -
  7. Cómo jugar Project Drift 2.0 con amigos?
  8. -

    Project Drift 2.0 tiene un modo multijugador que te permite jugar con amigos u otros jugadores en línea en batallas de deriva en tiempo real. Para jugar a Project Drift 2.0 con tus amigos, sigue estos pasos:

    -
      -
    1. Inicie el juego y asegúrese de tener una conexión a Internet.
    2. -
    3. Seleccione el modo multijugador en el menú principal.
    4. -
    5. Seleccione un coche y una pista que desea reproducir.
    6. -
    7. Espera a que el juego encuentre un oponente o invita a un amigo a unirse a tu partida.
    8. -
    9. Comienza a derrapar y trata de vencer a tu oponente o amigo anotando más puntos o a la deriva más tiempo.
    10. -
    -
  9. ¿Cómo obtener más dinero en Project Drift 2.0?
  10. -

    Si utiliza Project Drift 2.0 Mod APK 43, tendrá dinero ilimitado en el juego, que se puede utilizar para comprar y actualizar cualquier coche que desee. Sin embargo, si usas la versión original del juego, tendrás que ganar dinero en el juego completando misiones y desafíos, o viendo anuncios o haciendo compras en la aplicación. Aquí hay algunos consejos sobre cómo obtener más dinero en Project Drift 2.0:

    -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Carrom Pool Disc Game Mod Apk Monedas Y Gemas Ilimitadas.md b/spaces/Benson/text-generation/Examples/Carrom Pool Disc Game Mod Apk Monedas Y Gemas Ilimitadas.md deleted file mode 100644 index eecac01feed3f488be147422f92b7fefe4d48172..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Carrom Pool Disc Game Mod Apk Monedas Y Gemas Ilimitadas.md +++ /dev/null @@ -1,71 +0,0 @@ -
    -

    Carrom Pool Disc Game Mod Apk: Cómo descargar y disfrutar de monedas y gemas ilimitadas

    -

    Si eres un fan de los juegos de tablero de carrom, te encantará Carrom Pool Disc Game. Este es un juego multijugador en línea que te permite jugar carrom con tus amigos u otros jugadores de todo el mundo. También puede personalizar sus tableros, piezas y señales con varios temas y diseños. Pero lo que si quieres disfrutar de más características y beneficios sin gastar dinero real? Ahí es donde Carrom Pool Mod Apk entra en juego. En este artículo, le diremos lo que es Carrom Pool Disc Game, lo que es Carrom Pool Mod Apk, cómo descargarlo e instalarlo, y algunos consejos y trucos para mejorar su juego.

    -

    carrom pool disc game mod apk monedas y gemas ilimitadas


    Download Filehttps://bltlly.com/2v6Kbw



    -

    ¿Qué es Carrom Pool Disc Game?

    -

    Carrom Pool Disc Game es un popular juego móvil desarrollado por Miniclip. Se basa en el tradicional juego de tablero de zanahoria, que es un juego de mesa que se originó en la India. El juego consiste en utilizar un delantero para golpear los discos en los bolsillos en las cuatro esquinas del tablero. Los discos son negros o blancos, y el jugador que mete todos sus discos primero gana el juego.

    -

    Características de Carrom Pool Disc Game

    -

    Algunas de las características de Carrom Pool Disc Game son:

    - -

    Cómo jugar Carrom Pool Disc Game

    -

    La jugabilidad de Carrom Pool Disc Game es simple e intuitiva. Solo tienes que arrastrar el dedo en la pantalla para apuntar a tu delantero, y liberarlo para golpear los discos. También puede ajustar la potencia de su tiro moviendo el dedo más cerca o más lejos del delantero. El objetivo es embolsarse todos los discos antes que tu oponente. También puedes usar boosters, como tiempo extra, turno extra o deshacer, para ayudarte a ganar el juego.

    -

    ¿Qué es Carrom Pool Mod Apk?

    -

    Carrom Pool Mod Apk es una versión modificada de Carrom Pool Disc Game que le da monedas y gemas ilimitadas. Monedas y gemas son las monedas del juego que necesitas para comprar nuevos tableros, piezas, tacos, cofres y boosters. Normalmente, tienes que ganarlos jugando juegos, completando misiones o viendo anuncios. Pero con Carrom Pool Mod Apk, se puede obtener de forma gratuita sin ningún tipo de molestia.

    -

    Beneficios de Carrom Pool Mod Apk

    -

    Algunos de los beneficios de Carrom Pool Mod Apk son:

    -

    - -

    Cómo descargar e instalar Carrom Pool Mod Apk

    -

    Descargar e instalar Carrom Pool Mod Apk es fácil y rápido. Solo tienes que seguir estos pasos:

    -
      -
    1. Haga clic en este enlace para descargar el Carrom Pool Mod Apk archivo: [Carrom Pool Mod Apk Download].
    2. - -
    3. Localice el archivo descargado en su administrador de archivos y toque en él para iniciar la instalación.
    4. -
    5. Espera a que termine la instalación y luego abre la aplicación.
    6. -
    7. Disfruta jugando Carrom Pool Disc Game con monedas y gemas ilimitadas.
    8. -
    -

    Consejos y trucos para Carrom Pool Disc Game

    -

    Si quieres mejorar tus habilidades y ganar más juegos en Carrom Pool Disc Game, aquí hay algunos consejos y trucos que puedes usar:

    -

    Modo de práctica

    -

    Antes de jugar online con otros jugadores, puedes practicar tus tiros y estrategias en el modo de práctica. Este modo le permite jugar contra el ordenador o con otro reproductor en el mismo dispositivo. También puedes elegir el nivel de dificultad y el modo de juego que quieres practicar. El modo de práctica es una gran manera de aprender lo básico y dominar el juego.

    -

    Puntería y potencia

    -

    Los aspectos más importantes de Carrom Pool Disc Game son el objetivo y el poder. Usted necesita apuntar su delantero con precisión y golpear los discos con la cantidad correcta de energía. Para apuntar al delantero, puede utilizar la guía que muestra la dirección y el ángulo de su tiro. También puede acercar o alejar para ver mejor el tablero. Para ajustar la potencia de su disparo, puede mover el dedo más cerca o más lejos del delantero. Necesitas equilibrar la potencia y la precisión de tu disparo dependiendo de la situación. Por ejemplo, si desea guardar un disco cerca de un bolsillo, puede usar una toma de baja potencia. Pero si quieres embolsarte un disco que esté lejos de un bolsillo, necesitas usar una toma de alta potencia.

    -

    Utilice refuerzos y cofres

    - -

    Conclusión

    -

    Carrom Pool Disc Game es un juego divertido y adictivo que puedes jugar con tus amigos u otros jugadores en línea. Se basa en el clásico juego de tablero de carrom, pero con más características y opciones. También puede descargar Carrom Pool Mod Apk para obtener monedas y gemas ilimitadas y desbloquear todos los elementos premium y arenas. Carrom Pool Mod Apk es fácil de descargar e instalar, y hará que su juego más emocionante y gratificante. Si quieres convertirte en un profesional en Carrom Pool Disc Game, también puedes utilizar algunos consejos y trucos que hemos compartido en este artículo. Entonces, ¿qué estás esperando? Descargar Carrom Pool Disc Game o Carrom Pool Mod Apk ahora y empezar a jugar!

    -

    Preguntas frecuentes

    -

    Aquí hay algunas preguntas frecuentes sobre Carrom Pool Disc Game y Carrom Pool Mod Apk:

    -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Chess King Mod Apk.md b/spaces/Benson/text-generation/Examples/Chess King Mod Apk.md deleted file mode 100644 index 702f3bada83c1d637eaaea14f9485a5cde62ff32..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Chess King Mod Apk.md +++ /dev/null @@ -1,91 +0,0 @@ -
    -

    Ajedrez rey Mod Apk: Un juego de mesa con características ilimitadas

    -

    Si eres un fan del ajedrez, es posible que hayas oído hablar de Chess King, una popular aplicación de juego de mesa que te permite jugar al ajedrez contra el ordenador u otros jugadores en línea. ¿Pero sabías que hay una versión modificada de Chess King que te da acceso a funciones y opciones ilimitadas? En este artículo, le diremos todo lo que necesita saber sobre Chess King Mod Apk, cómo descargarlo e instalarlo, por qué debe jugar, y cómo jugarlo. Así que, vamos a empezar!

    -

    chess king mod apk


    Download Filehttps://bltlly.com/2v6Ktx



    -

    ¿Qué es el ajedrez rey Mod Apk?

    -

    Una breve introducción al juego y sus características

    -

    Chess King es una aplicación de juego de mesa que te permite jugar al ajedrez en tu dispositivo móvil. Puede elegir entre diferentes modos, como entrenamiento, rompecabezas, torneos, partidos en línea y más. También puede personalizar su tablero, piezas y fondo de acuerdo a sus preferencias. Chess King tiene una interfaz fácil de usar y un potente motor que le proporciona una experiencia de ajedrez realista y desafiante.

    -

    Chess King Mod Apk es una versión modificada de Chess King que desbloquea todas las características y opciones que están restringidas o pagadas en la versión original. Con Chess King Mod Apk, se puede disfrutar de:

    - -

    Cómo descargar e instalar el apk mod en su dispositivo

    -

    Descargar e instalar Chess King Mod Apk es muy fácil y simple. Solo tienes que seguir estos pasos:

    -
      -
    1. Haga clic en este enlace para descargar el archivo apk mod en su dispositivo.
    2. - -
    3. Toque en el archivo y permita la instalación desde fuentes desconocidas si se le solicita.
    4. -
    5. Espere a que la instalación termine y luego inicie la aplicación.
    6. -
    7. Disfruta jugando ajedrez rey Mod Apk con características ilimitadas!
    8. -
    -

    ¿Por qué usted debe jugar ajedrez rey Mod Apk?

    -

    Los beneficios de jugar al ajedrez para tu cerebro y habilidades

    -

    El ajedrez no es solo un juego divertido y entretenido, sino también una gran manera de mejorar tu cerebro y habilidades. Jugar ajedrez puede ayudarte:

    -

    -
-

Conclusion

-

Rocket League Sideswipe is a mobile version of the popular car soccer game Rocket League. It has been redesigned for mobile devices, with simplified controls, shorter matches, and a 2D perspective. However, it still retains the core gameplay and features of Rocket League, such as car soccer, customization, online multiplayer, and seasons. You can download and install the game from the official app stores, or from a third-party website that offers the modded APK file with everything unlocked. However, the latter method is not recommended, as it may have some drawbacks and risks. You can play and enjoy the game by learning the controls, practicing your moves, customizing your car, unlocking items with the Rocket Pass, and playing online with friends or other players around the world. Rocket League Sideswipe is a fun and addictive game that will keep you entertained for hours.

-

FAQs

-

Here are some frequently asked questions about Rocket League Sideswipe APK todo desbloqueado:

-
    -
  1. What is the difference between Rocket League and Rocket League Sideswipe?
  2. -

    Rocket League is the original game that is available for PC and consoles, while Rocket League Sideswipe is the mobile version that is available for Android and iOS devices. Rocket League Sideswipe has been adapted for mobile devices, with simplified controls, shorter matches, and a 2D perspective. However, it still has the same gameplay and features as Rocket League.

    -
  3. Is Rocket League Sideswipe free to play?
  4. -

    Yes, Rocket League Sideswipe is free to play and download from the official app stores. However, it has some optional in-app purchases, such as credits that can be used to buy premium items or tiers in the Rocket Pass.

    -
  5. How can I play Rocket League Sideswipe with my friends?
  6. -

    You can play Rocket League Sideswipe with your friends in private matches or online multiplayer mode. To play in private matches, you need to create or join a room code that you can share with your friends. To play in online multiplayer mode, you need to invite your friends to your party or join their party.

    -
  7. How can I update Rocket League Sideswipe to the latest version?
  8. -

    If you downloaded the game from the official app stores, you can update it automatically or manually from the app store. If you downloaded the modded APK file from a third-party website, you may not be able to update it to the latest version. You may need to download and install a new modded APK file from the same or another website.

    -
  9. How can I contact Psyonix Studios for support or feedback?
  10. -

    You can contact Psyonix Studios for support or feedback by visiting their official website or social media accounts. You can also use the in-game feedback option or email them at support@psyonix.com.

    -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/fffiloni/Music_Source_Separation/bytesep/callbacks/base_callbacks.py b/spaces/fffiloni/Music_Source_Separation/bytesep/callbacks/base_callbacks.py deleted file mode 100644 index ef62dd591f1516aa41e2ba347cc3aaa558854f8d..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/Music_Source_Separation/bytesep/callbacks/base_callbacks.py +++ /dev/null @@ -1,44 +0,0 @@ -import logging -import os -from typing import NoReturn - -import pytorch_lightning as pl -import torch -import torch.nn as nn -from pytorch_lightning.utilities import rank_zero_only - - -class SaveCheckpointsCallback(pl.Callback): - def __init__( - self, - model: nn.Module, - checkpoints_dir: str, - save_step_frequency: int, - ): - r"""Callback to save checkpoints every #save_step_frequency steps. - - Args: - model: nn.Module - checkpoints_dir: str, directory to save checkpoints - save_step_frequency: int - """ - self.model = model - self.checkpoints_dir = checkpoints_dir - self.save_step_frequency = save_step_frequency - os.makedirs(self.checkpoints_dir, exist_ok=True) - - @rank_zero_only - def on_batch_end(self, trainer: pl.Trainer, _) -> NoReturn: - r"""Save checkpoint.""" - global_step = trainer.global_step - - if global_step % self.save_step_frequency == 0: - - checkpoint_path = os.path.join( - self.checkpoints_dir, "step={}.pth".format(global_step) - ) - - checkpoint = {'step': global_step, 'model': self.model.state_dict()} - - torch.save(checkpoint, checkpoint_path) - logging.info("Save checkpoint to {}".format(checkpoint_path)) diff --git a/spaces/fffiloni/Music_Source_Separation/scripts/2_create_indexes/vctk-musdb18/create_indexes.sh b/spaces/fffiloni/Music_Source_Separation/scripts/2_create_indexes/vctk-musdb18/create_indexes.sh deleted file mode 100644 index e2a85230b2745cedb2c98a34ed303082bb1ec48a..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/Music_Source_Separation/scripts/2_create_indexes/vctk-musdb18/create_indexes.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -WORKSPACE=${1:-"./workspaces/bytesep"} # Default workspace directory - -echo "WORKSPACE=${WORKSPACE}" - -# Users can modify the following config file. -INDEXES_CONFIG_YAML="scripts/2_create_indexes/vctk-musdb18/configs/speech-accompaniment,sr=44100,chn=2.yaml" - -# Create indexes for training. -python3 bytesep/dataset_creation/create_indexes/create_indexes.py \ - --workspace=$WORKSPACE \ - --config_yaml=$INDEXES_CONFIG_YAML diff --git a/spaces/fffiloni/SplitTrack2MusicGen/tests/data/test_audio_utils.py b/spaces/fffiloni/SplitTrack2MusicGen/tests/data/test_audio_utils.py deleted file mode 100644 index 0480671bb17281d61ce02bce6373a5ccec89fece..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/SplitTrack2MusicGen/tests/data/test_audio_utils.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import julius -import torch -import pytest - -from audiocraft.data.audio_utils import ( - _clip_wav, - convert_audio_channels, - convert_audio, - normalize_audio -) -from ..common_utils import get_batch_white_noise - - -class TestConvertAudioChannels: - - def test_convert_audio_channels_downmix(self): - b, c, t = 2, 3, 100 - audio = get_batch_white_noise(b, c, t) - mixed = convert_audio_channels(audio, channels=2) - assert list(mixed.shape) == [b, 2, t] - - def test_convert_audio_channels_nochange(self): - b, c, t = 2, 3, 100 - audio = get_batch_white_noise(b, c, t) - mixed = convert_audio_channels(audio, channels=c) - assert list(mixed.shape) == list(audio.shape) - - def test_convert_audio_channels_upmix(self): - b, c, t = 2, 1, 100 - audio = get_batch_white_noise(b, c, t) - mixed = convert_audio_channels(audio, channels=3) - assert list(mixed.shape) == [b, 3, t] - - def test_convert_audio_channels_upmix_error(self): - b, c, t = 2, 2, 100 - audio = get_batch_white_noise(b, c, t) - with pytest.raises(ValueError): - convert_audio_channels(audio, channels=3) - - -class TestConvertAudio: - - def test_convert_audio_channels_downmix(self): - b, c, dur = 2, 3, 4. - sr = 128 - audio = get_batch_white_noise(b, c, int(sr * dur)) - out = convert_audio(audio, from_rate=sr, to_rate=sr, to_channels=2) - assert list(out.shape) == [audio.shape[0], 2, audio.shape[-1]] - - def test_convert_audio_channels_upmix(self): - b, c, dur = 2, 1, 4. - sr = 128 - audio = get_batch_white_noise(b, c, int(sr * dur)) - out = convert_audio(audio, from_rate=sr, to_rate=sr, to_channels=3) - assert list(out.shape) == [audio.shape[0], 3, audio.shape[-1]] - - def test_convert_audio_upsample(self): - b, c, dur = 2, 1, 4. - sr = 2 - new_sr = 3 - audio = get_batch_white_noise(b, c, int(sr * dur)) - out = convert_audio(audio, from_rate=sr, to_rate=new_sr, to_channels=c) - out_j = julius.resample.resample_frac(audio, old_sr=sr, new_sr=new_sr) - assert torch.allclose(out, out_j) - - def test_convert_audio_resample(self): - b, c, dur = 2, 1, 4. - sr = 3 - new_sr = 2 - audio = get_batch_white_noise(b, c, int(sr * dur)) - out = convert_audio(audio, from_rate=sr, to_rate=new_sr, to_channels=c) - out_j = julius.resample.resample_frac(audio, old_sr=sr, new_sr=new_sr) - assert torch.allclose(out, out_j) - - -class TestNormalizeAudio: - - def test_clip_wav(self): - b, c, dur = 2, 1, 4. - sr = 3 - audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur)) - _clip_wav(audio) - assert audio.abs().max() <= 1 - - def test_normalize_audio_clip(self): - b, c, dur = 2, 1, 4. - sr = 3 - audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur)) - norm_audio = normalize_audio(audio, strategy='clip') - assert norm_audio.abs().max() <= 1 - - def test_normalize_audio_rms(self): - b, c, dur = 2, 1, 4. - sr = 3 - audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur)) - norm_audio = normalize_audio(audio, strategy='rms') - assert norm_audio.abs().max() <= 1 - - def test_normalize_audio_peak(self): - b, c, dur = 2, 1, 4. - sr = 3 - audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur)) - norm_audio = normalize_audio(audio, strategy='peak') - assert norm_audio.abs().max() <= 1 diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/object-inspect/test/bigint.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/object-inspect/test/bigint.js deleted file mode 100644 index 4ecc31df8ab3fd311570d866b418514d159abfde..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/object-inspect/test/bigint.js +++ /dev/null @@ -1,58 +0,0 @@ -'use strict'; - -var inspect = require('../'); -var test = require('tape'); -var hasToStringTag = require('has-tostringtag/shams')(); - -test('bigint', { skip: typeof BigInt === 'undefined' }, function (t) { - t.test('primitives', function (st) { - st.plan(3); - - st.equal(inspect(BigInt(-256)), '-256n'); - st.equal(inspect(BigInt(0)), '0n'); - st.equal(inspect(BigInt(256)), '256n'); - }); - - t.test('objects', function (st) { - st.plan(3); - - st.equal(inspect(Object(BigInt(-256))), 'Object(-256n)'); - st.equal(inspect(Object(BigInt(0))), 'Object(0n)'); - st.equal(inspect(Object(BigInt(256))), 'Object(256n)'); - }); - - t.test('syntactic primitives', function (st) { - st.plan(3); - - /* eslint-disable no-new-func */ - st.equal(inspect(Function('return -256n')()), '-256n'); - st.equal(inspect(Function('return 0n')()), '0n'); - st.equal(inspect(Function('return 256n')()), '256n'); - }); - - t.test('toStringTag', { skip: !hasToStringTag }, function (st) { - st.plan(1); - - var faker = {}; - faker[Symbol.toStringTag] = 'BigInt'; - st.equal( - inspect(faker), - '{ [Symbol(Symbol.toStringTag)]: \'BigInt\' }', - 'object lying about being a BigInt inspects as an object' - ); - }); - - t.test('numericSeparator', function (st) { - st.equal(inspect(BigInt(0), { numericSeparator: false }), '0n', '0n, numericSeparator false'); - st.equal(inspect(BigInt(0), { numericSeparator: true }), '0n', '0n, numericSeparator true'); - - st.equal(inspect(BigInt(1234), { numericSeparator: false }), '1234n', '1234n, numericSeparator false'); - st.equal(inspect(BigInt(1234), { numericSeparator: true }), '1_234n', '1234n, numericSeparator true'); - st.equal(inspect(BigInt(-1234), { numericSeparator: false }), '-1234n', '1234n, numericSeparator false'); - st.equal(inspect(BigInt(-1234), { numericSeparator: true }), '-1_234n', '1234n, numericSeparator true'); - - st.end(); - }); - - t.end(); -}); diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/socket.io-parser/build/cjs/index.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/socket.io-parser/build/cjs/index.js deleted file mode 100644 index fc99bbfae2f7f645e0db479003ad7d1afab82849..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/socket.io-parser/build/cjs/index.js +++ /dev/null @@ -1,303 +0,0 @@ -"use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.Decoder = exports.Encoder = exports.PacketType = exports.protocol = void 0; -const component_emitter_1 = require("@socket.io/component-emitter"); -const binary_js_1 = require("./binary.js"); -const is_binary_js_1 = require("./is-binary.js"); -const debug_1 = require("debug"); // debug() -const debug = (0, debug_1.default)("socket.io-parser"); // debug() -/** - * Protocol version. - * - * @public - */ -exports.protocol = 5; -var PacketType; -(function (PacketType) { - PacketType[PacketType["CONNECT"] = 0] = "CONNECT"; - PacketType[PacketType["DISCONNECT"] = 1] = "DISCONNECT"; - PacketType[PacketType["EVENT"] = 2] = "EVENT"; - PacketType[PacketType["ACK"] = 3] = "ACK"; - PacketType[PacketType["CONNECT_ERROR"] = 4] = "CONNECT_ERROR"; - PacketType[PacketType["BINARY_EVENT"] = 5] = "BINARY_EVENT"; - PacketType[PacketType["BINARY_ACK"] = 6] = "BINARY_ACK"; -})(PacketType = exports.PacketType || (exports.PacketType = {})); -/** - * A socket.io Encoder instance - */ -class Encoder { - /** - * Encoder constructor - * - * @param {function} replacer - custom replacer to pass down to JSON.parse - */ - constructor(replacer) { - this.replacer = replacer; - } - /** - * Encode a packet as a single string if non-binary, or as a - * buffer sequence, depending on packet type. - * - * @param {Object} obj - packet object - */ - encode(obj) { - debug("encoding packet %j", obj); - if (obj.type === PacketType.EVENT || obj.type === PacketType.ACK) { - if ((0, is_binary_js_1.hasBinary)(obj)) { - return this.encodeAsBinary({ - type: obj.type === PacketType.EVENT - ? PacketType.BINARY_EVENT - : PacketType.BINARY_ACK, - nsp: obj.nsp, - data: obj.data, - id: obj.id, - }); - } - } - return [this.encodeAsString(obj)]; - } - /** - * Encode packet as string. - */ - encodeAsString(obj) { - // first is type - let str = "" + obj.type; - // attachments if we have them - if (obj.type === PacketType.BINARY_EVENT || - obj.type === PacketType.BINARY_ACK) { - str += obj.attachments + "-"; - } - // if we have a namespace other than `/` - // we append it followed by a comma `,` - if (obj.nsp && "/" !== obj.nsp) { - str += obj.nsp + ","; - } - // immediately followed by the id - if (null != obj.id) { - str += obj.id; - } - // json data - if (null != obj.data) { - str += JSON.stringify(obj.data, this.replacer); - } - debug("encoded %j as %s", obj, str); - return str; - } - /** - * Encode packet as 'buffer sequence' by removing blobs, and - * deconstructing packet into object with placeholders and - * a list of buffers. - */ - encodeAsBinary(obj) { - const deconstruction = (0, binary_js_1.deconstructPacket)(obj); - const pack = this.encodeAsString(deconstruction.packet); - const buffers = deconstruction.buffers; - buffers.unshift(pack); // add packet info to beginning of data list - return buffers; // write all the buffers - } -} -exports.Encoder = Encoder; -/** - * A socket.io Decoder instance - * - * @return {Object} decoder - */ -class Decoder extends component_emitter_1.Emitter { - /** - * Decoder constructor - * - * @param {function} reviver - custom reviver to pass down to JSON.stringify - */ - constructor(reviver) { - super(); - this.reviver = reviver; - } - /** - * Decodes an encoded packet string into packet JSON. - * - * @param {String} obj - encoded packet - */ - add(obj) { - let packet; - if (typeof obj === "string") { - if (this.reconstructor) { - throw new Error("got plaintext data when reconstructing a packet"); - } - packet = this.decodeString(obj); - const isBinaryEvent = packet.type === PacketType.BINARY_EVENT; - if (isBinaryEvent || packet.type === PacketType.BINARY_ACK) { - packet.type = isBinaryEvent ? PacketType.EVENT : PacketType.ACK; - // binary packet's json - this.reconstructor = new BinaryReconstructor(packet); - // no attachments, labeled binary but no binary data to follow - if (packet.attachments === 0) { - super.emitReserved("decoded", packet); - } - } - else { - // non-binary full packet - super.emitReserved("decoded", packet); - } - } - else if ((0, is_binary_js_1.isBinary)(obj) || obj.base64) { - // raw binary data - if (!this.reconstructor) { - throw new Error("got binary data when not reconstructing a packet"); - } - else { - packet = this.reconstructor.takeBinaryData(obj); - if (packet) { - // received final buffer - this.reconstructor = null; - super.emitReserved("decoded", packet); - } - } - } - else { - throw new Error("Unknown type: " + obj); - } - } - /** - * Decode a packet String (JSON data) - * - * @param {String} str - * @return {Object} packet - */ - decodeString(str) { - let i = 0; - // look up type - const p = { - type: Number(str.charAt(0)), - }; - if (PacketType[p.type] === undefined) { - throw new Error("unknown packet type " + p.type); - } - // look up attachments if type binary - if (p.type === PacketType.BINARY_EVENT || - p.type === PacketType.BINARY_ACK) { - const start = i + 1; - while (str.charAt(++i) !== "-" && i != str.length) { } - const buf = str.substring(start, i); - if (buf != Number(buf) || str.charAt(i) !== "-") { - throw new Error("Illegal attachments"); - } - p.attachments = Number(buf); - } - // look up namespace (if any) - if ("/" === str.charAt(i + 1)) { - const start = i + 1; - while (++i) { - const c = str.charAt(i); - if ("," === c) - break; - if (i === str.length) - break; - } - p.nsp = str.substring(start, i); - } - else { - p.nsp = "/"; - } - // look up id - const next = str.charAt(i + 1); - if ("" !== next && Number(next) == next) { - const start = i + 1; - while (++i) { - const c = str.charAt(i); - if (null == c || Number(c) != c) { - --i; - break; - } - if (i === str.length) - break; - } - p.id = Number(str.substring(start, i + 1)); - } - // look up json data - if (str.charAt(++i)) { - const payload = this.tryParse(str.substr(i)); - if (Decoder.isPayloadValid(p.type, payload)) { - p.data = payload; - } - else { - throw new Error("invalid payload"); - } - } - debug("decoded %s as %j", str, p); - return p; - } - tryParse(str) { - try { - return JSON.parse(str, this.reviver); - } - catch (e) { - return false; - } - } - static isPayloadValid(type, payload) { - switch (type) { - case PacketType.CONNECT: - return typeof payload === "object"; - case PacketType.DISCONNECT: - return payload === undefined; - case PacketType.CONNECT_ERROR: - return typeof payload === "string" || typeof payload === "object"; - case PacketType.EVENT: - case PacketType.BINARY_EVENT: - return Array.isArray(payload) && payload.length > 0; - case PacketType.ACK: - case PacketType.BINARY_ACK: - return Array.isArray(payload); - } - } - /** - * Deallocates a parser's resources - */ - destroy() { - if (this.reconstructor) { - this.reconstructor.finishedReconstruction(); - this.reconstructor = null; - } - } -} -exports.Decoder = Decoder; -/** - * A manager of a binary event's 'buffer sequence'. Should - * be constructed whenever a packet of type BINARY_EVENT is - * decoded. - * - * @param {Object} packet - * @return {BinaryReconstructor} initialized reconstructor - */ -class BinaryReconstructor { - constructor(packet) { - this.packet = packet; - this.buffers = []; - this.reconPack = packet; - } - /** - * Method to be called when binary data received from connection - * after a BINARY_EVENT packet. - * - * @param {Buffer | ArrayBuffer} binData - the raw binary data received - * @return {null | Object} returns null if more binary data is expected or - * a reconstructed packet object if all buffers have been received. - */ - takeBinaryData(binData) { - this.buffers.push(binData); - if (this.buffers.length === this.reconPack.attachments) { - // done with buffer list - const packet = (0, binary_js_1.reconstructPacket)(this.reconPack, this.buffers); - this.finishedReconstruction(); - return packet; - } - return null; - } - /** - * Cleans up binary packet reconstruction variables. - */ - finishedReconstruction() { - this.reconPack = null; - this.buffers = []; - } -} diff --git a/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_63.py b/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_63.py deleted file mode 100644 index 8516dee445a1c65c255c1aa334507c28a84ed7d2..0000000000000000000000000000000000000000 --- a/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_63.py +++ /dev/null @@ -1,35 +0,0 @@ - -import re - -def is_spam(message: str) -> bool: - # Checking for spam URL patterns - spam_url_patterns = [ - r"(?i)https?:\/\/(?:me2\.kr|buly\.kr|opcn\-kakao.com|han.gl|abit\.ly)/\S*", - r"(?i)ⓢlⓩ102\.com", - r"(?i)orl\.kr\/\S*", - r"(?i)https?://openkakao.io/\S*" - ] - - for pattern in spam_url_patterns: - if re.search(pattern, message): - return True - - # Checking for other spam patterns - spam_patterns = [ - r"(?i)(vip|vvip)투자반", - r"(?i)차별화 된", - r"(?i)시작하루만에", - r"(?i)추천주 현황", - r"(?i)slot🎰zone", - r"(?i)지니틱스", - r"(?i)카카오톡제재", - r"(?i)[5일평균].*[8,930.000원]", - r"(?i)문의▼", - ] - - for pattern in spam_patterns: - if re.search(pattern, message): - return True - - # If none of the spam patterns are present - return False diff --git a/spaces/fiz123321/nah/greeting.md b/spaces/fiz123321/nah/greeting.md deleted file mode 100644 index 6c2c45107629147b25bbb2a17d9f297702132769..0000000000000000000000000000000000000000 --- a/spaces/fiz123321/nah/greeting.md +++ /dev/null @@ -1,3 +0,0 @@ -<3 -https://rentry.org/miniproxy - \ No newline at end of file diff --git a/spaces/flax-community/roberta-base-mr/apps/mlm.py b/spaces/flax-community/roberta-base-mr/apps/mlm.py deleted file mode 100644 index 7f5389e0f63c463c45d111cc52807436b14e7589..0000000000000000000000000000000000000000 --- a/spaces/flax-community/roberta-base-mr/apps/mlm.py +++ /dev/null @@ -1,53 +0,0 @@ -import json - -import streamlit as st -from transformers import AutoTokenizer, RobertaForMaskedLM, pipeline - -with open("config.json", encoding="utf8") as f: - cfg = json.loads(f.read()) - - -@st.cache(allow_output_mutation=True, show_spinner=False) -def load_model(input_text, model_name_or_path): - tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) - model = RobertaForMaskedLM.from_pretrained(model_name_or_path) - - nlp = pipeline("fill-mask", model=model, tokenizer=tokenizer) - result = nlp(input_text) - sentence, mask = result[0]["sequence"], result[0]["token_str"] - return sentence, mask, result - - -def app(): - st.title("RoBERTa Marathi - मराठी भाषा") - - st.markdown( - "This demo uses [RoBERTa for Marathi](https://huggingface.co/flax-community/roberta-base-mr) model " - "trained on [mC4](https://huggingface.co/datasets/mc4)." - ) - - st.markdown( - "❓Can't figure out where to get a sample text other than the predefined ones?❓\n\n" - "Use any custom sentence with masked word or copy any headline from this [link](https://maharashtratimes.com/entertainment/articlelist/19359255.cms), and mask a word.\n" - "> 📒 NOTE: Supports only single `` word" - ) - - masked_texts = [ - "मोठी बातमी! उद्या दुपारी वाजता जाहीर होणार दहावीचा निकाल", - "जॉनी लीवर यांनी नम्रता संभेरावला दिलं गिफ्ट, अभिनेत्रीने व्यक्त केल्या भावना" - # "अध्यक्ष पवार आणि उपमुख्यमंत्री अजित पवार यांची भेट घेतली.", - ] - - input_text = st.sidebar.selectbox("Select a Text", options=masked_texts) - masked_text = st.text_input("Please type a masked sentence to fill", input_text) - - fill_button = st.button("Fill the Mask!") - - if fill_button: - with st.spinner("Filling the Mask..."): - filled_sentence, mask, raw_json = load_model(masked_text, cfg["models"]["RoBERTa"]) - - st.markdown(f"**Filled sentence: **{filled_sentence}") - st.markdown(f"**Predicted masked token: **{mask}") - - st.write(raw_json) diff --git a/spaces/freshield/ChatGPT-gradio/OpenaiBot.py b/spaces/freshield/ChatGPT-gradio/OpenaiBot.py deleted file mode 100644 index 81734a5986aff1be9ea3490fd786262daf8de07d..0000000000000000000000000000000000000000 --- a/spaces/freshield/ChatGPT-gradio/OpenaiBot.py +++ /dev/null @@ -1,60 +0,0 @@ -# coding=utf-8 -""" -@Author: Freshield -@Contact: yangyufresh@163.com -@File: OpenaiBot.py -@Time: 2023-03-03 17:47 -@Last_update: 2023-03-03 17:47 -@Desc: None -@==============================================@ -@ _____ _ _ _ _ @ -@ | __|___ ___ ___| |_|_|___| |_| | @ -@ | __| _| -_|_ -| | | -_| | . | @ -@ |__| |_| |___|___|_|_|_|___|_|___| @ -@ Freshield @ -@==============================================@ -""" -import os -import openai - - -class OpenaiBot(object): - """调用openai的机器人""" - def __init__(self, temperature=0.5): - openai.api_key = os.environ.get('OPENAI_API_KEY') - self.model_engine = "gpt-3.5-turbo" - self.temperature = temperature - - def set_api_key(self, api_key): - """设定api key""" - openai.api_key = api_key - - def construct_message(self, role, new_msg, history_list, keep_history=3): - """ - 构造message,这里history_list是一个list,每个元素是一个tuple - """ - msg_list = [{"role": "system", "content": role}] - history_list = history_list[-keep_history:] - for user, assistant in history_list: - msg_list.append({"role": "user", "content": user}) - msg_list.append({"role": "assistant", "content": assistant}) - msg_list.append({"role": "user", "content": new_msg}) - - return msg_list - - def get_response(self, role, new_msg, history_list, keep_history=3): - """ - 通过openai获取回复 - """ - msg_list = self.construct_message(role, new_msg, history_list, keep_history) - response = openai.ChatCompletion.create( - model=self.model_engine, messages=msg_list, - temperature=self.temperature - ) - content = response['choices'][0]['message']['content'] - - return content - - -if __name__ == '__main__': - openai_bot = OpenaiBot() diff --git a/spaces/fuckyoudeki/AutoGPT/autogpt/memory/milvus.py b/spaces/fuckyoudeki/AutoGPT/autogpt/memory/milvus.py deleted file mode 100644 index 44aa72b956224fa4c2a16d5f40b0eaeb35e98581..0000000000000000000000000000000000000000 --- a/spaces/fuckyoudeki/AutoGPT/autogpt/memory/milvus.py +++ /dev/null @@ -1,115 +0,0 @@ -""" Milvus memory storage provider.""" -from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections - -from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding - - -class MilvusMemory(MemoryProviderSingleton): - """Milvus memory storage provider.""" - - def __init__(self, cfg) -> None: - """Construct a milvus memory storage connection. - - Args: - cfg (Config): Auto-GPT global config. - """ - # connect to milvus server. - connections.connect(address=cfg.milvus_addr) - fields = [ - FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True), - FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=1536), - FieldSchema(name="raw_text", dtype=DataType.VARCHAR, max_length=65535), - ] - - # create collection if not exist and load it. - self.milvus_collection = cfg.milvus_collection - self.schema = CollectionSchema(fields, "auto-gpt memory storage") - self.collection = Collection(self.milvus_collection, self.schema) - # create index if not exist. - if not self.collection.has_index(): - self.collection.release() - self.collection.create_index( - "embeddings", - { - "metric_type": "IP", - "index_type": "HNSW", - "params": {"M": 8, "efConstruction": 64}, - }, - index_name="embeddings", - ) - self.collection.load() - - def add(self, data) -> str: - """Add an embedding of data into memory. - - Args: - data (str): The raw text to construct embedding index. - - Returns: - str: log. - """ - embedding = get_ada_embedding(data) - result = self.collection.insert([[embedding], [data]]) - _text = ( - "Inserting data into memory at primary key: " - f"{result.primary_keys[0]}:\n data: {data}" - ) - return _text - - def get(self, data): - """Return the most relevant data in memory. - Args: - data: The data to compare to. - """ - return self.get_relevant(data, 1) - - def clear(self) -> str: - """Drop the index in memory. - - Returns: - str: log. - """ - self.collection.drop() - self.collection = Collection(self.milvus_collection, self.schema) - self.collection.create_index( - "embeddings", - { - "metric_type": "IP", - "index_type": "HNSW", - "params": {"M": 8, "efConstruction": 64}, - }, - index_name="embeddings", - ) - self.collection.load() - return "Obliviated" - - def get_relevant(self, data: str, num_relevant: int = 5): - """Return the top-k relevant data in memory. - Args: - data: The data to compare to. - num_relevant (int, optional): The max number of relevant data. - Defaults to 5. - - Returns: - list: The top-k relevant data. - """ - # search the embedding and return the most relevant text. - embedding = get_ada_embedding(data) - search_params = { - "metrics_type": "IP", - "params": {"nprobe": 8}, - } - result = self.collection.search( - [embedding], - "embeddings", - search_params, - num_relevant, - output_fields=["raw_text"], - ) - return [item.entity.value_of_field("raw_text") for item in result[0]] - - def get_stats(self) -> str: - """ - Returns: The stats of the milvus cache. - """ - return f"Entities num: {self.collection.num_entities}" diff --git a/spaces/fun-research/FC-CLIP/fcclip/data/datasets/register_pascal_ctx_59_sem_seg.py b/spaces/fun-research/FC-CLIP/fcclip/data/datasets/register_pascal_ctx_59_sem_seg.py deleted file mode 100644 index a4019b3006e8347bdbd936297852c1b2cbd22ff6..0000000000000000000000000000000000000000 --- a/spaces/fun-research/FC-CLIP/fcclip/data/datasets/register_pascal_ctx_59_sem_seg.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import os - -import numpy as np - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.data.datasets import load_sem_seg - -from . import openseg_classes - -PASCAL_CTX_59_CATEGORIES=openseg_classes.get_pascal_ctx_59_categories_with_prompt_eng() - -PASCAL_CTX_59_COLORS = [k["color"] for k in PASCAL_CTX_59_CATEGORIES] - -MetadataCatalog.get("openvocab_pascal_ctx59_sem_seg_train").set( - stuff_colors=PASCAL_CTX_59_COLORS[:], -) - -MetadataCatalog.get("openvocab_pascal_ctx59_sem_seg_val").set( - stuff_colors=PASCAL_CTX_59_COLORS[:], -) - -def _get_ctx59_meta(): - # Id 0 is reserved for ignore_label, we change ignore_label for 0 - # to 255 in our pre-processing, so all ids are shifted by 1. - stuff_ids = [k["id"] for k in PASCAL_CTX_59_CATEGORIES] - assert len(stuff_ids) == 59, len(stuff_ids) - - # For semantic segmentation, this mapping maps from contiguous stuff id - # (in [0, 91], used in models) to ids in the dataset (used for processing results) - stuff_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(stuff_ids)} - stuff_classes = [k["name"] for k in PASCAL_CTX_59_CATEGORIES] - - ret = { - "stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id, - "stuff_classes": stuff_classes, - } - return ret - - -def register_all_ctx59(root): - root = os.path.join(root, "pascal_ctx_d2") - meta = _get_ctx59_meta() - for name, dirname in [("train", "training"), ("val", "validation")]: - image_dir = os.path.join(root, "images", dirname) - gt_dir = os.path.join(root, "annotations_ctx59", dirname) - name = f"openvocab_pascal_ctx59_sem_seg_{name}" - DatasetCatalog.register( - name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg") - ) - MetadataCatalog.get(name).set( - stuff_classes=meta["stuff_classes"][:], - thing_dataset_id_to_contiguous_id={}, # to make Mask2Former happy - stuff_dataset_id_to_contiguous_id=meta["stuff_dataset_id_to_contiguous_id"], - image_root=image_dir, - sem_seg_root=gt_dir, - evaluator_type="sem_seg", - ignore_label=255, - gt_ext="png", - ) - -_root = os.getenv("DETECTRON2_DATASETS", "datasets") -register_all_ctx59(_root) \ No newline at end of file diff --git a/spaces/golem4300/RVC-TTS/vc_infer_pipeline.py b/spaces/golem4300/RVC-TTS/vc_infer_pipeline.py deleted file mode 100644 index 2e5d17bdc522e3d2757f2accd17258994b40e613..0000000000000000000000000000000000000000 --- a/spaces/golem4300/RVC-TTS/vc_infer_pipeline.py +++ /dev/null @@ -1,230 +0,0 @@ -from scipy import signal -from functools import lru_cache -import torch.nn.functional as F -import numpy as np, parselmouth, torch -import pyworld, os, traceback, faiss, librosa - -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - -@lru_cache -def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period, input_audio_path2wav): - audio = input_audio_path2wav[input_audio_path] - f0, t = pyworld.harvest( - audio, fs=fs, f0_ceil=f0max, f0_floor=f0min, frame_period=frame_period - ) - f0 = pyworld.stonemask(audio, f0, t, fs) - return f0 - -def change_rms(data1, sr1, data2, sr2, rate): - rms1 = librosa.feature.rms(y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2) - rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2) - rms1 = torch.from_numpy(rms1).unsqueeze(0) - rms2 = torch.from_numpy(rms2).unsqueeze(0) - rms1 = F.interpolate(rms1, size=data2.shape[0], mode="linear").squeeze() - rms2 = F.interpolate(rms2, size=data2.shape[0], mode="linear").squeeze() - rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6) - data2 *= (torch.pow(rms1, 1 - rate) * torch.pow(rms2, rate - 1)).numpy() - - return data2 - -class VC: - def __init__(self, tgt_sr, config): - self.x_pad = config.x_pad - self.x_query = config.x_query - self.x_center = config.x_center - self.x_max = config.x_max - self.is_half = config.is_half - self.sr = 16000 - self.window = 160 - self.t_pad = self.sr * self.x_pad - self.t_pad_tgt = tgt_sr * self.x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * self.x_query - self.t_center = self.sr * self.x_center - self.t_max = self.sr * self.x_max - self.device = config.device - - def get_f0(self, input_audio_path, x, p_len, f0_up_key, f0_method, filter_radius, inp_f0=None): - global input_audio_path2wav - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - - if f0_method == "pm": - f0 = (parselmouth.Sound(x, self.sr) - .to_pitch_ac(time_step=time_step / 1000, voicing_threshold=0.6, pitch_floor=f0_min, - pitch_ceiling=f0_max,) - .selected_array["frequency"]) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0 *= pow(2, f0_up_key / 12) - tf0 = self.sr // self.window - - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp(list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]) - shape = f0[self.x_pad * tf0: self.x_pad * tf0 + len(replace_f0)].shape[0] - f0[self.x_pad * tf0: self.x_pad * tf0 + len(replace_f0)] = replace_f0[:shape] - - f0bak= f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (f0_mel_max - f0_mel_min) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak - - def vc(self, model, net_g, sid, audio0, pitch, pitchf, times, index, big_npy, index_rate, version, protect): - feats = torch.from_numpy(audio0) - feats = feats.half() if self.is_half else feats.float() - - if feats.dim() == 2: - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { "source": feats.to(self.device), "padding_mask": padding_mask, "output_layer": 9 if version == "v1" else 12} - - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) if version == "v1" else logits[0] - - if protect < 0.5 and pitch is not None and pitchf is not None: - feats0 = feats.clone() - - if index is not None and big_npy is not None and index_rate != 0: - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float64") - - score, ix = index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - - if self.is_half: - npy = npy.astype("float16") - feats = ( torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate + (1 - index_rate) * feats) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - if protect < 0.5 and pitch is not None and pitchf is not None: - feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch is not None and pitchf is not None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - - if protect < 0.5 and pitch is not None and pitchf is not None: - pitchff = pitchf.clone() - pitchff[pitchf > 0] = 1 - pitchff[pitchf < 1] = protect - pitchff = pitchff.unsqueeze(-1) - feats = feats * pitchff + feats0 * (1 - pitchff) - feats = feats.to(feats0.dtype) - p_len = torch.tensor([p_len], device=self.device).long() - - with torch.no_grad(): - if pitch is not None and pitchf is not None: - audio1 = ( - (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]) - .data.cpu() - .float() - .numpy() - ) - else: - audio1 = ((net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy()) - - del feats, p_len, padding_mask - return audio1 - - def pipeline(self,model, net_g, sid, audio, input_audio_path, times, f0_up_key, f0_method, file_index, index_rate, if_f0, filter_radius, tgt_sr, resample_sr, rms_mix_rate, version, protect, f0_file=None,): - if ( - file_index != "" - and os.path.exists(file_index) == True - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - big_npy = index.reconstruct_n(0, index.ntotal) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name") == True: - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float64") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0 == 1: - pitch, pitchf = self.get_f0( - input_audio_path,audio_pad,p_len,f0_up_key,f0_method,filter_radius,inp_f0, - ) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - for t in opt_ts: - t = t // self.window * self.window - if if_f0 == 1: - audio_opt.append( - self.vc(model, net_g, sid,audio_pad[s : t + self.t_pad2 + self.window], pitch[:, s // self.window : (t + self.t_pad2) // self.window],pitchf[:, s // self.window : (t + self.t_pad2) // self.window],times,index,big_npy,index_rate,version,protect,)[self.t_pad_tgt : -self.t_pad_tgt]) - else: - audio_opt.append(self.vc(model,net_g,sid,audio_pad[s : t + self.t_pad2 + self.window],None,None,times,index,big_npy,index_rate,version,protect)[self.t_pad_tgt : -self.t_pad_tgt]) - s = t - if if_f0 == 1: - audio_opt.append(self.vc(model,net_g,sid,audio_pad[t:],pitch[:, t // self.window :] if t is not None else pitch,pitchf[:, t // self.window :] if t is not None else pitchf,times,index,big_npy,index_rate,version,protect,)[self.t_pad_tgt : -self.t_pad_tgt]) - else: - audio_opt.append(self.vc(model, net_g, sid, audio_pad[t:], None, None, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt]) - audio_opt = np.concatenate(audio_opt) - if rms_mix_rate != 1: - audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate) - if resample_sr >= 16000 and tgt_sr != resample_sr: - audio_opt = librosa.resample(audio_opt, orig_sr=tgt_sr, target_sr=resample_sr) - audio_max = np.abs(audio_opt).max() / 0.99 - max_int16 = 32768 - if audio_max > 1: - max_int16 /= audio_max - audio_opt = (audio_opt * max_int16).astype(np.int16) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return audio_opt \ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Keygen Extra Quality AutoCAD Architecture 2016.md b/spaces/gotiQspiryo/whisper-ui/examples/Keygen Extra Quality AutoCAD Architecture 2016.md deleted file mode 100644 index ae4eb440f041f854ea7b1919afc425b36e7f1376..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Keygen Extra Quality AutoCAD Architecture 2016.md +++ /dev/null @@ -1,115 +0,0 @@ -
-

Keygen AutoCAD Architecture 2016: A Complete Guide

- -

If you are looking for a way to activate any product of Autodesk 2016, you may have heard of Keygen AutoCAD Architecture 2016. This is a tool that can generate serial numbers and product keys that match certain templates for Autodesk products. In this article, we will explain what Keygen AutoCAD Architecture 2016 is, how to use it, and what are the advantages and disadvantages of using it.

-

Keygen AutoCAD Architecture 2016


DOWNLOADhttps://urlgoal.com/2uyMya



- -

What is Keygen AutoCAD Architecture 2016?

- -

Keygen AutoCAD Architecture 2016 is a software that can create unique codes that can unlock the full features of Autodesk products, such as AutoCAD Architecture 2016. AutoCAD Architecture 2016 is a software that helps architects and designers create drawings, documentation, and schedules for building projects. It has many tools and features that can improve productivity and accuracy.

- -

However, to use AutoCAD Architecture 2016, you need to have a valid license that can be purchased from Autodesk or authorized resellers. A license can be either perpetual or subscription-based, and it can be either single-user or multi-user. The price of a license depends on the type, duration, and number of users.

- -

If you do not have a license, you can still use AutoCAD Architecture 2016 in trial mode for 30 days. After that, you will need to activate the software with a serial number and a product key. This is where Keygen AutoCAD Architecture 2016 comes in handy. It can generate serial numbers and product keys that can bypass the activation process and make the software think that it is licensed.

-

- -

How to use Keygen AutoCAD Architecture 2016?

- -

To use Keygen AutoCAD Architecture 2016, you need to follow these steps:

- -
    -
  1. Download Keygen AutoCAD Architecture 2016 from various sources on the internet. Be careful to choose a reliable and safe source, as some keygens may contain viruses or malware.
  2. -
  3. Install Autodesk Autocad Architecture 2016 with a serial number and a product key that match certain templates. You can find these templates on the internet or use the ones provided by the keygen.
  4. -
  5. Run the keygen as administrator and copy the request code from the activation screen of AutoCAD Architecture 2016.
  6. -
  7. Paste the request code into the keygen and press generate. The keygen will produce an activation code.
  8. -
  9. Copy the activation code and paste it into the activation screen of AutoCAD Architecture 2016. Click next and you should see a message that says "Activation successful".
  10. -
  11. Block the outgoing traffic or disconnect from the internet before activating. This will prevent Autodesk from detecting that you are using a fake license.
  12. -
- -

Congratulations! You have successfully activated AutoCAD Architecture 2016 with Keygen AutoCAD Architecture 2016.

- -

What are the advantages and disadvantages of using Keygen AutoCAD Architecture 2016?

- -

Using Keygen AutoCAD Architecture 2016 has some pros and cons that you should be aware of before deciding to use it.

- -

The main advantage of using Keygen AutoCAD Architecture 2016 is that it can save you money. You do not have to pay for a license or a subscription to use AutoCAD Architecture 2016. You can enjoy all the features and functions of the software without any limitations or restrictions.

- -

The main disadvantage of using Keygen AutoCAD Architecture 2016 is that it is illegal and unethical. You are violating the terms and conditions of Autodesk by using a pirated software. You are also depriving Autodesk of their rightful revenue and support. You may face legal consequences if you are caught using Keygen AutoCAD Architecture 2016.

- -

Another disadvantage of using Keygen AutoCAD Architecture 2016 is that it may not work properly or at all. Some keygens may not generate valid codes or may generate codes that have already been used by someone else. Some keygens may also cause errors or crashes in the software or your system. Some keygens may also contain viruses or malware that can harm your computer or steal your personal information.

- -

Conclusion

- -

Keygen AutoCAD Architecture 2016 is a tool that can activate any product of Autodesk 2016, including AutoCAD Architecture 2016. It can generate serial numbers and product keys that can unlock the full features of the software. However, using Keygen AutoCAD Architecture 2016 is illegal and unethical, and it may also pose some risks to your system and security. Therefore, we do not recommend using Keygen AutoCAD Architecture 2016 or any other similar tools. Instead, we suggest that you purchase a legitimate license from Autodesk or authorized resellers.

-

What are the benefits of using AutoCAD Architecture 2016?

- -

AutoCAD Architecture 2016 is a powerful software that can help you design and document architectural projects. It has many benefits that can improve your workflow and efficiency, such as:

- -
    -
  • It has a user-friendly interface that allows you to access tools and commands easily.
  • -
  • It has a comprehensive library of architectural objects and styles that you can use to create realistic and accurate drawings.
  • -
  • It has a dynamic model that updates automatically as you make changes to your design.
  • -
  • It has a smart dimensioning system that helps you create accurate and consistent annotations.
  • -
  • It has a collaboration feature that enables you to work with other professionals and share your data across platforms.
  • -
- -

With AutoCAD Architecture 2016, you can create stunning and professional architectural drawings that meet your standards and specifications.

- -

What are the risks of using Keygen AutoCAD Architecture 2016?

- -

While Keygen AutoCAD Architecture 2016 may seem like a convenient and cost-effective way to use AutoCAD Architecture 2016, it also comes with some risks that you should be aware of before using it. Some of these risks are:

- -
    -
  • You may violate the intellectual property rights of Autodesk and face legal actions or penalties.
  • -
  • You may compromise the quality and performance of your software and your system.
  • -
  • You may expose your computer and your data to viruses or malware that can damage or steal them.
  • -
  • You may lose access to updates, support, and services from Autodesk.
  • -
  • You may miss out on new features and improvements that Autodesk releases for their products.
  • -
- -

Using Keygen AutoCAD Architecture 2016 may seem tempting, but it is not worth the risk. You may end up losing more than you gain by using a pirated software.

- -

How to get a legitimate license for AutoCAD Architecture 2016?

- -

If you want to use AutoCAD Architecture 2016 legally and safely, you need to get a legitimate license from Autodesk or authorized resellers. You can choose from different types of licenses depending on your needs and preferences, such as:

- -
    -
  • A perpetual license that allows you to use the software indefinitely without paying any additional fees.
  • -
  • A subscription license that allows you to use the software for a specified period of time and pay a recurring fee.
  • -
  • A single-user license that allows you to use the software on one device only.
  • -
  • A multi-user license that allows you to use the software on multiple devices or share it with other users.
  • -
- -

To get a license for AutoCAD Architecture 2016, you need to visit the official website of Autodesk or authorized resellers and follow the instructions. You will need to provide some information, such as your name, email address, country, and payment method. You will also need to agree to the terms and conditions of Autodesk. After completing the purchase, you will receive a confirmation email with your serial number and product key. You can then use these codes to activate your software and enjoy its full features and functions.

-

What are the alternatives to using Keygen AutoCAD Architecture 2016?

- -

If you do not want to use Keygen AutoCAD Architecture 2016 or any other similar tools, you have some alternatives that can help you use AutoCAD Architecture 2016 legally and safely. Some of these alternatives are:

- -
    -
  • You can use the trial version of AutoCAD Architecture 2016 for 30 days. This will allow you to test the software and see if it meets your needs and expectations. You can download the trial version from the official website of Autodesk.
  • -
  • You can use the student version of AutoCAD Architecture 2016 for 3 years. This will allow you to use the software for educational purposes only. You can get the student version from the Autodesk Education Community.
  • -
  • You can use the free web and mobile apps of AutoCAD Architecture 2016. These will allow you to view, edit, and share your drawings online or on your mobile devices. You can access the web app from any browser or download the mobile app from the App Store or Google Play.
  • -
  • You can use other software that are similar to AutoCAD Architecture 2016. These will allow you to create and document architectural projects with different features and functions. You can find some of these software on the internet or ask for recommendations from other professionals.
  • -
- -

These alternatives may not have all the features and functions of AutoCAD Architecture 2016, but they may still help you achieve your goals and objectives.

- -

How to learn more about AutoCAD Architecture 2016?

- -

If you want to learn more about AutoCAD Architecture 2016, you have some resources that can help you improve your skills and knowledge. Some of these resources are:

- -
    -
  • You can read the user guide and the help files of AutoCAD Architecture 2016. These will provide you with detailed information and instructions on how to use the software and its tools and commands.
  • -
  • You can watch the tutorials and videos of AutoCAD Architecture 2016. These will show you how to perform various tasks and operations with the software and its features.
  • -
  • You can take online courses and training programs on AutoCAD Architecture 2016. These will teach you how to use the software effectively and efficiently for different types of projects and scenarios.
  • -
  • You can join online forums and communities of AutoCAD Architecture 2016 users. These will allow you to interact with other professionals and experts who can answer your questions and share their tips and tricks.
  • -
- -

These resources may help you learn more about AutoCAD Architecture 2016 and enhance your performance and productivity.

-

Conclusion

- -

AutoCAD Architecture 2016 is a software that can help you design and document architectural projects. It has many features and functions that can improve your workflow and accuracy. However, to use AutoCAD Architecture 2016, you need to have a valid license that can be purchased from Autodesk or authorized resellers. If you do not have a license, you may be tempted to use Keygen AutoCAD Architecture 2016 or any other similar tools that can activate the software without paying. However, using Keygen AutoCAD Architecture 2016 is illegal and unethical, and it may also pose some risks to your system and security. Therefore, we do not recommend using Keygen AutoCAD Architecture 2016 or any other similar tools. Instead, we suggest that you use some alternatives that can help you use AutoCAD Architecture 2016 legally and safely. We also suggest that you use some resources that can help you learn more about AutoCAD Architecture 2016 and improve your skills and knowledge.

- -

We hope that this article has provided you with useful information and guidance on Keygen AutoCAD Architecture 2016 and AutoCAD Architecture 2016. If you have any questions or comments, please feel free to contact us or leave a comment below.

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Kunci Jawaban Buku Pr Intan Pariwara Geografi Kelas X Updatedl.md b/spaces/gotiQspiryo/whisper-ui/examples/Kunci Jawaban Buku Pr Intan Pariwara Geografi Kelas X Updatedl.md deleted file mode 100644 index ae872211bbc9015b675be9c6a4f37fcc4efec43e..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Kunci Jawaban Buku Pr Intan Pariwara Geografi Kelas X Updatedl.md +++ /dev/null @@ -1,6 +0,0 @@ -

Kunci Jawaban Buku Pr Intan Pariwara Geografi Kelas X | Updatedl


Download ::: https://urlgoal.com/2uyLFu



- -setiap kelasnya. Kode Mapel. Bahasa Indonesia BI Bahasa Inggris EN Mathematics MT Biologi BL Fisika FS Kimia KM Ekonomi EK Geografi GF Sosiologi SL Sejarah SJ . Àðìåðñèòåòà, òðóäèòåòà ïðîäèíèöà ðåáÿÿ îñòàâèòü ñïèðàæåëûì òèïû â åðåøèøüñÿ îïðàâèòüñÿ òåïåðåæíóþ â ðåáÿìè 8a78ff9644
-
-
-

diff --git a/spaces/gradio/HuBERT/examples/truncated_bptt/truncated_bptt_lm_task.py b/spaces/gradio/HuBERT/examples/truncated_bptt/truncated_bptt_lm_task.py deleted file mode 100644 index 02be0e7fb4213b98798c85b79e9046e9990b97fc..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/truncated_bptt/truncated_bptt_lm_task.py +++ /dev/null @@ -1,281 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import os -from dataclasses import dataclass, field -from typing import List, Optional, Tuple - -import torch -from fairseq import utils -from fairseq.data import ( - Dictionary, - TokenBlockDataset, - data_utils, - iterators, -) -from fairseq.dataclass import FairseqDataclass -from fairseq.distributed import utils as dist_utils -from fairseq.tasks import FairseqTask, register_task -from omegaconf import II - - -logger = logging.getLogger(__name__) - - -@dataclass -class TruncatedBPTTLMConfig(FairseqDataclass): - data: str = field(default="???", metadata={"help": "path to data directory"}) - tokens_per_sample: int = field( - default=1024, - metadata={"help": "max number of tokens per sequence"}, - ) - batch_size: int = II("dataset.batch_size") - # Some models use *max_target_positions* to know how many positional - # embeddings to learn. We use II(...) to make it default to - # *tokens_per_sample*, but in principle there could be more positional - # embeddings than tokens in a single batch. This may also be irrelevant for - # custom model implementations. - max_target_positions: int = II("task.tokens_per_sample") - # these will be populated automatically if not provided - data_parallel_rank: Optional[int] = None - data_parallel_size: Optional[int] = None - - -@register_task("truncated_bptt_lm", dataclass=TruncatedBPTTLMConfig) -class TruncatedBPTTLMTask(FairseqTask): - def __init__(self, cfg: TruncatedBPTTLMConfig): - super().__init__(cfg) - - if cfg.data_parallel_rank is None or cfg.data_parallel_size is None: - if torch.distributed.is_initialized(): - cfg.data_parallel_rank = dist_utils.get_data_parallel_rank() - cfg.data_parallel_size = dist_utils.get_data_parallel_world_size() - else: - cfg.data_parallel_rank = 0 - cfg.data_parallel_size = 1 - - # load the dictionary - paths = utils.split_paths(cfg.data) - assert len(paths) > 0 - self.dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt")) - logger.info("dictionary: {} types".format(len(self.dictionary))) - - def load_dataset(self, split, epoch=1, combine=False, **kwargs): - """Load a given dataset split (e.g., train, valid, test)""" - - # support sharded datasets - paths = utils.split_paths(self.cfg.data) - assert len(paths) > 0 - data_path = paths[(epoch - 1) % len(paths)] - split_path = os.path.join(data_path, split) - - # each element of *data* will be a tensorized line from the original - # text dataset, similar to ``open(split_path).readlines()`` - data = data_utils.load_indexed_dataset( - split_path, self.dictionary, combine=combine - ) - if data is None: - raise FileNotFoundError( - "Dataset not found: {} ({})".format(split, split_path) - ) - - # this is similar to ``data.view(-1).split(tokens_per_sample)`` - data = TokenBlockDataset( - data, - data.sizes, - block_size=self.cfg.tokens_per_sample, - pad=None, # unused - eos=None, # unused - break_mode="none", - ) - - self.datasets[split] = TruncatedBPTTDataset( - data=data, - bsz_per_shard=self.cfg.batch_size, - shard_id=self.cfg.data_parallel_rank, - num_shards=self.cfg.data_parallel_size, - ) - - def dataset(self, split): - return self.datasets[split] - - def get_batch_iterator( - self, dataset, num_workers=0, epoch=1, data_buffer_size=0, **kwargs - ): - return iterators.EpochBatchIterator( - dataset=dataset, - collate_fn=self._collate_fn, - num_workers=num_workers, - epoch=epoch, - buffer_size=data_buffer_size, - # we don't use the batching functionality from EpochBatchIterator; - # instead every item in *dataset* is a whole batch - batch_sampler=[[i] for i in range(len(dataset))], - disable_shuffling=True, - ) - - def _collate_fn(self, items: List[List[torch.Tensor]]): - # we don't use fairseq's batching functionality, so we expect a single - # Tensor of type List[torch.Tensor] - assert len(items) == 1 - - # item will have shape B x T (the last batch may have length < T) - id, item = items[0] - item = data_utils.collate_tokens(item, pad_idx=self.source_dictionary.pad()) - B, T = item.size() - - # shift item one position over and append a padding token for the target - target = torch.nn.functional.pad( - item[:, 1:], (0, 1, 0, 0), value=self.target_dictionary.pad() - ) - - # fairseq expects batches to have the following structure - return { - "id": torch.tensor([id]*item.size(0)), - "net_input": { - "src_tokens": item, - }, - "target": target, - "nsentences": item.size(0), - "ntokens": item.numel(), - } - - def build_dataset_for_inference( - self, src_tokens: List[torch.Tensor], src_lengths: List[int], **kwargs - ) -> torch.utils.data.Dataset: - eos = self.source_dictionary.eos() - dataset = TokenBlockDataset( - src_tokens, - src_lengths, - block_size=None, # ignored for "eos" break mode - pad=self.source_dictionary.pad(), - eos=eos, - break_mode="eos", - ) - - class Dataset(torch.utils.data.Dataset): - def __getitem__(self, i): - item = dataset[i] - if item[-1] == eos: - # remove eos to support generating with a prefix - item = item[:-1] - return (i, [item]) - - def __len__(self): - return len(dataset) - - return Dataset() - - def inference_step( - self, generator, models, sample, prefix_tokens=None, constraints=None - ): - with torch.no_grad(): - if constraints is not None: - raise NotImplementedError - - # SequenceGenerator doesn't use *src_tokens* directly, we need to - # pass the *prefix_tokens* argument instead. - if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement(): - prefix_tokens = sample["net_input"]["src_tokens"] - - # begin generation with the end-of-sentence token - bos_token = self.source_dictionary.eos() - - return generator.generate( - models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token - ) - - def eval_lm_dataloader( - self, - dataset, - max_tokens: Optional[int] = 36000, - batch_size: Optional[int] = None, - max_positions: Optional[int] = None, - num_shards: int = 1, - shard_id: int = 0, - num_workers: int = 1, - data_buffer_size: int = 10, - context_window: int = 0, - ): - if context_window > 0: - raise NotImplementedError( - "Transformer-XL doesn't need --context-window, try " - "--model-overrides '{\"mem_len\":42}' instead " - ) - return self.get_batch_iterator( - dataset=dataset, - max_tokens=max_tokens, - max_sentences=batch_size, - max_positions=max_positions, - ignore_invalid_inputs=True, - num_shards=num_shards, - shard_id=shard_id, - num_workers=num_workers, - data_buffer_size=data_buffer_size, - ).next_epoch_itr(shuffle=False) - - @property - def source_dictionary(self): - return self.dictionary - - @property - def target_dictionary(self): - return self.dictionary - - -class TruncatedBPTTDataset(torch.utils.data.Dataset): - def __init__( - self, - data: List[torch.Tensor], # ordered list of items - bsz_per_shard, # number of items processed per GPUs per forward - shard_id, # current GPU ID - num_shards, # number of GPUs - ): - super().__init__() - self.data = data - - def batchify(data, bsz): - # Work out how cleanly we can divide the dataset into bsz parts. - nbatch = data.size(0) // bsz - # Trim off any extra elements that wouldn't cleanly fit (remainders). - data = data.narrow(0, 0, nbatch * bsz) - # Evenly divide the data across the bsz batches. - data = data.view(bsz, -1).contiguous() - return data - - # total number of sequences processed by all GPUs in each forward pass - global_batch_size = bsz_per_shard * num_shards - - """ - With a 16 item dataset, bsz_per_shard=2 and num_shards=3, - *indices* might look like: - - indices = [[0, 1], - [2, 3], - [4, 5], - [6, 7], - [8, 9], - [10, 11]] - - The size of the TruncatedBPTTDataset instance will be 2, - and shard 1 will see items: - - [(0, [data[4], data[6]]), - (1, [data[5], data[7]])] - """ - indices = batchify(torch.arange(len(data)), global_batch_size) - assert indices.size(0) == global_batch_size - - self.my_indices = indices[ - shard_id * bsz_per_shard : (shard_id + 1) * bsz_per_shard - ] - assert self.my_indices.size(0) == bsz_per_shard - - def __len__(self): - return self.my_indices.size(1) - - def __getitem__(self, i) -> Tuple[int, List[torch.Tensor]]: - return (i, [self.data[idx] for idx in self.my_indices[:, i]]) diff --git a/spaces/gradio/diff_texts/README.md b/spaces/gradio/diff_texts/README.md deleted file mode 100644 index 7448de3efc10360d4881ee7b2cdbb4ed9269d1b7..0000000000000000000000000000000000000000 --- a/spaces/gradio/diff_texts/README.md +++ /dev/null @@ -1,12 +0,0 @@ - ---- -title: diff_texts -emoji: 🔥 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 4.1.2 -app_file: run.py -pinned: false -hf_oauth: true ---- diff --git a/spaces/gradio/longformer/longformer/longformer.py b/spaces/gradio/longformer/longformer/longformer.py deleted file mode 100644 index 14da60f5ef0adc78de3676a90fc49d47ba5ce66c..0000000000000000000000000000000000000000 --- a/spaces/gradio/longformer/longformer/longformer.py +++ /dev/null @@ -1,269 +0,0 @@ -from typing import List -import math -import torch -from torch import nn -import torch.nn.functional as F -from longformer.diagonaled_mm_tvm import diagonaled_mm as diagonaled_mm_tvm, mask_invalid_locations -from longformer.sliding_chunks import sliding_chunks_matmul_qk, sliding_chunks_matmul_pv -from longformer.sliding_chunks import sliding_chunks_no_overlap_matmul_qk, sliding_chunks_no_overlap_matmul_pv -from transformers.modeling_roberta import RobertaConfig, RobertaModel, RobertaForMaskedLM - - -class Longformer(RobertaModel): - def __init__(self, config): - super(Longformer, self).__init__(config) - if config.attention_mode == 'n2': - pass # do nothing, use BertSelfAttention instead - else: - for i, layer in enumerate(self.encoder.layer): - layer.attention.self = LongformerSelfAttention(config, layer_id=i) - - -class LongformerForMaskedLM(RobertaForMaskedLM): - def __init__(self, config): - super(LongformerForMaskedLM, self).__init__(config) - if config.attention_mode == 'n2': - pass # do nothing, use BertSelfAttention instead - else: - for i, layer in enumerate(self.roberta.encoder.layer): - layer.attention.self = LongformerSelfAttention(config, layer_id=i) - - -class LongformerConfig(RobertaConfig): - def __init__(self, attention_window: List[int] = None, attention_dilation: List[int] = None, - autoregressive: bool = False, attention_mode: str = 'sliding_chunks', **kwargs): - """ - Args: - attention_window: list of attention window sizes of length = number of layers. - window size = number of attention locations on each side. - For an affective window size of 512, use `attention_window=[256]*num_layers` - which is 256 on each side. - attention_dilation: list of attention dilation of length = number of layers. - attention dilation of `1` means no dilation. - autoregressive: do autoregressive attention or have attention of both sides - attention_mode: 'n2' for regular n^2 self-attention, 'tvm' for TVM implemenation of Longformer - selfattention, 'sliding_chunks' for another implementation of Longformer selfattention - """ - super().__init__(**kwargs) - self.attention_window = attention_window - self.attention_dilation = attention_dilation - self.autoregressive = autoregressive - self.attention_mode = attention_mode - assert self.attention_mode in ['tvm', 'sliding_chunks', 'n2', 'sliding_chunks_no_overlap'] - - -class LongformerSelfAttention(nn.Module): - def __init__(self, config, layer_id): - super(LongformerSelfAttention, self).__init__() - if config.hidden_size % config.num_attention_heads != 0: - raise ValueError( - "The hidden size (%d) is not a multiple of the number of attention " - "heads (%d)" % (config.hidden_size, config.num_attention_heads)) - self.num_heads = config.num_attention_heads - self.head_dim = int(config.hidden_size / config.num_attention_heads) - self.embed_dim = config.hidden_size - - self.query = nn.Linear(config.hidden_size, self.embed_dim) - self.key = nn.Linear(config.hidden_size, self.embed_dim) - self.value = nn.Linear(config.hidden_size, self.embed_dim) - - self.query_global = nn.Linear(config.hidden_size, self.embed_dim) - self.key_global = nn.Linear(config.hidden_size, self.embed_dim) - self.value_global = nn.Linear(config.hidden_size, self.embed_dim) - - self.dropout = config.attention_probs_dropout_prob - - self.layer_id = layer_id - self.attention_window = config.attention_window[self.layer_id] - self.attention_dilation = config.attention_dilation[self.layer_id] - self.attention_mode = config.attention_mode - self.autoregressive = config.autoregressive - assert self.attention_window > 0 - assert self.attention_dilation > 0 - assert self.attention_mode in ['tvm', 'sliding_chunks', 'sliding_chunks_no_overlap'] - if self.attention_mode in ['sliding_chunks', 'sliding_chunks_no_overlap']: - assert not self.autoregressive # not supported - assert self.attention_dilation == 1 # dilation is not supported - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - output_attentions=False, - ): - ''' - The `attention_mask` is changed in `BertModel.forward` from 0, 1, 2 to - -ve: no attention - 0: local attention - +ve: global attention - ''' - assert encoder_hidden_states is None, "`encoder_hidden_states` is not supported and should be None" - assert encoder_attention_mask is None, "`encoder_attention_mask` is not supported and shiould be None" - - if attention_mask is not None: - attention_mask = attention_mask.squeeze(dim=2).squeeze(dim=1) - key_padding_mask = attention_mask < 0 - extra_attention_mask = attention_mask > 0 - remove_from_windowed_attention_mask = attention_mask != 0 - - num_extra_indices_per_batch = extra_attention_mask.long().sum(dim=1) - max_num_extra_indices_per_batch = num_extra_indices_per_batch.max() - if max_num_extra_indices_per_batch <= 0: - extra_attention_mask = None - else: - # To support the case of variable number of global attention in the rows of a batch, - # we use the following three selection masks to select global attention embeddings - # in a 3d tensor and pad it to `max_num_extra_indices_per_batch` - # 1) selecting embeddings that correspond to global attention - extra_attention_mask_nonzeros = extra_attention_mask.nonzero(as_tuple=True) - zero_to_max_range = torch.arange(0, max_num_extra_indices_per_batch, - device=num_extra_indices_per_batch.device) - # mask indicating which values are actually going to be padding - selection_padding_mask = zero_to_max_range < num_extra_indices_per_batch.unsqueeze(dim=-1) - # 2) location of the non-padding values in the selected global attention - selection_padding_mask_nonzeros = selection_padding_mask.nonzero(as_tuple=True) - # 3) location of the padding values in the selected global attention - selection_padding_mask_zeros = (selection_padding_mask == 0).nonzero(as_tuple=True) - else: - remove_from_windowed_attention_mask = None - extra_attention_mask = None - key_padding_mask = None - - hidden_states = hidden_states.transpose(0, 1) - seq_len, bsz, embed_dim = hidden_states.size() - assert embed_dim == self.embed_dim - q = self.query(hidden_states) - k = self.key(hidden_states) - v = self.value(hidden_states) - q /= math.sqrt(self.head_dim) - - q = q.view(seq_len, bsz, self.num_heads, self.head_dim).transpose(0, 1) - k = k.view(seq_len, bsz, self.num_heads, self.head_dim).transpose(0, 1) - # attn_weights = (bsz, seq_len, num_heads, window*2+1) - if self.attention_mode == 'tvm': - q = q.float().contiguous() - k = k.float().contiguous() - attn_weights = diagonaled_mm_tvm(q, k, self.attention_window, self.attention_dilation, False, 0, False) - elif self.attention_mode == "sliding_chunks": - attn_weights = sliding_chunks_matmul_qk(q, k, self.attention_window, padding_value=0) - elif self.attention_mode == "sliding_chunks_no_overlap": - attn_weights = sliding_chunks_no_overlap_matmul_qk(q, k, self.attention_window, padding_value=0) - else: - raise False - mask_invalid_locations(attn_weights, self.attention_window, self.attention_dilation, False) - if remove_from_windowed_attention_mask is not None: - # This implementation is fast and takes very little memory because num_heads x hidden_size = 1 - # from (bsz x seq_len) to (bsz x seq_len x num_heads x hidden_size) - remove_from_windowed_attention_mask = remove_from_windowed_attention_mask.unsqueeze(dim=-1).unsqueeze(dim=-1) - # cast to float/half then replace 1's with -inf - float_mask = remove_from_windowed_attention_mask.type_as(q).masked_fill(remove_from_windowed_attention_mask, -10000.0) - repeat_size = 1 if isinstance(self.attention_dilation, int) else len(self.attention_dilation) - float_mask = float_mask.repeat(1, 1, repeat_size, 1) - ones = float_mask.new_ones(size=float_mask.size()) # tensor of ones - # diagonal mask with zeros everywhere and -inf inplace of padding - if self.attention_mode == 'tvm': - d_mask = diagonaled_mm_tvm(ones, float_mask, self.attention_window, self.attention_dilation, False, 0, False) - elif self.attention_mode == "sliding_chunks": - d_mask = sliding_chunks_matmul_qk(ones, float_mask, self.attention_window, padding_value=0) - elif self.attention_mode == "sliding_chunks_no_overlap": - d_mask = sliding_chunks_no_overlap_matmul_qk(ones, float_mask, self.attention_window, padding_value=0) - - attn_weights += d_mask - assert list(attn_weights.size())[:3] == [bsz, seq_len, self.num_heads] - assert attn_weights.size(dim=3) in [self.attention_window * 2 + 1, self.attention_window * 3] - - # the extra attention - if extra_attention_mask is not None: - selected_k = k.new_zeros(bsz, max_num_extra_indices_per_batch, self.num_heads, self.head_dim) - selected_k[selection_padding_mask_nonzeros] = k[extra_attention_mask_nonzeros] - # (bsz, seq_len, num_heads, max_num_extra_indices_per_batch) - selected_attn_weights = torch.einsum('blhd,bshd->blhs', (q, selected_k)) - selected_attn_weights[selection_padding_mask_zeros[0], :, :, selection_padding_mask_zeros[1]] = -10000 - # concat to attn_weights - # (bsz, seq_len, num_heads, extra attention count + 2*window+1) - attn_weights = torch.cat((selected_attn_weights, attn_weights), dim=-1) - attn_weights_float = F.softmax(attn_weights, dim=-1, dtype=torch.float32) # use fp32 for numerical stability - if key_padding_mask is not None: - # softmax sometimes inserts NaN if all positions are masked, replace them with 0 - attn_weights_float = torch.masked_fill(attn_weights_float, key_padding_mask.unsqueeze(-1).unsqueeze(-1), 0.0) - attn_weights = attn_weights_float.type_as(attn_weights) - attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training) - v = v.view(seq_len, bsz, self.num_heads, self.head_dim).transpose(0, 1) - attn = 0 - if extra_attention_mask is not None: - selected_attn_probs = attn_probs.narrow(-1, 0, max_num_extra_indices_per_batch) - selected_v = v.new_zeros(bsz, max_num_extra_indices_per_batch, self.num_heads, self.head_dim) - selected_v[selection_padding_mask_nonzeros] = v[extra_attention_mask_nonzeros] - # use `matmul` because `einsum` crashes sometimes with fp16 - # attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v)) - attn = torch.matmul(selected_attn_probs.transpose(1, 2), selected_v.transpose(1, 2).type_as(selected_attn_probs)).transpose(1, 2) - attn_probs = attn_probs.narrow(-1, max_num_extra_indices_per_batch, attn_probs.size(-1) - max_num_extra_indices_per_batch).contiguous() - - if self.attention_mode == 'tvm': - v = v.float().contiguous() - attn += diagonaled_mm_tvm(attn_probs, v, self.attention_window, self.attention_dilation, True, 0, False) - elif self.attention_mode == "sliding_chunks": - attn += sliding_chunks_matmul_pv(attn_probs, v, self.attention_window) - elif self.attention_mode == "sliding_chunks_no_overlap": - attn += sliding_chunks_no_overlap_matmul_pv(attn_probs, v, self.attention_window) - else: - raise False - - attn = attn.type_as(hidden_states) - assert list(attn.size()) == [bsz, seq_len, self.num_heads, self.head_dim] - attn = attn.transpose(0, 1).reshape(seq_len, bsz, embed_dim).contiguous() - - # For this case, we'll just recompute the attention for these indices - # and overwrite the attn tensor. TODO: remove the redundant computation - if extra_attention_mask is not None: - selected_hidden_states = hidden_states.new_zeros(max_num_extra_indices_per_batch, bsz, embed_dim) - selected_hidden_states[selection_padding_mask_nonzeros[::-1]] = hidden_states[extra_attention_mask_nonzeros[::-1]] - - q = self.query_global(selected_hidden_states) - k = self.key_global(hidden_states) - v = self.value_global(hidden_states) - q /= math.sqrt(self.head_dim) - - q = q.contiguous().view(max_num_extra_indices_per_batch, bsz * self.num_heads, self.head_dim).transpose(0, 1) # (bsz*self.num_heads, max_num_extra_indices_per_batch, head_dim) - k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) # bsz * self.num_heads, seq_len, head_dim) - v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) # bsz * self.num_heads, seq_len, head_dim) - attn_weights = torch.bmm(q, k.transpose(1, 2)) - assert list(attn_weights.size()) == [bsz * self.num_heads, max_num_extra_indices_per_batch, seq_len] - - attn_weights = attn_weights.view(bsz, self.num_heads, max_num_extra_indices_per_batch, seq_len) - attn_weights[selection_padding_mask_zeros[0], :, selection_padding_mask_zeros[1], :] = -10000.0 - if key_padding_mask is not None: - attn_weights = attn_weights.masked_fill( - key_padding_mask.unsqueeze(1).unsqueeze(2), - -10000.0, - ) - attn_weights = attn_weights.view(bsz * self.num_heads, max_num_extra_indices_per_batch, seq_len) - attn_weights_float = F.softmax(attn_weights, dim=-1, dtype=torch.float32) # use fp32 for numerical stability - attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training) - selected_attn = torch.bmm(attn_probs, v) - assert list(selected_attn.size()) == [bsz * self.num_heads, max_num_extra_indices_per_batch, self.head_dim] - - selected_attn_4d = selected_attn.view(bsz, self.num_heads, max_num_extra_indices_per_batch, self.head_dim) - nonzero_selected_attn = selected_attn_4d[selection_padding_mask_nonzeros[0], :, selection_padding_mask_nonzeros[1]] - attn[extra_attention_mask_nonzeros[::-1]] = nonzero_selected_attn.view(len(selection_padding_mask_nonzeros[0]), -1).type_as(hidden_states) - - context_layer = attn.transpose(0, 1) - if output_attentions: - if extra_attention_mask is not None: - # With global attention, return global attention probabilities only - # batch_size x num_heads x max_num_global_attention_tokens x sequence_length - # which is the attention weights from tokens with global attention to all tokens - # It doesn't not return local attention - # In case of variable number of global attantion in the rows of a batch, - # attn_weights are padded with -10000.0 attention scores - attn_weights = attn_weights.view(bsz, self.num_heads, max_num_extra_indices_per_batch, seq_len) - else: - # without global attention, return local attention probabilities - # batch_size x num_heads x sequence_length x window_size - # which is the attention weights of every token attending to its neighbours - attn_weights = attn_weights.permute(0, 2, 1, 3) - outputs = (context_layer, attn_weights) if output_attentions else (context_layer,) - return outputs diff --git a/spaces/gwang-kim/DATID-3D/eg3d/training/augment.py b/spaces/gwang-kim/DATID-3D/eg3d/training/augment.py deleted file mode 100644 index 7b00a4ade50459c16e34fa4c132b2cb947cfff28..0000000000000000000000000000000000000000 --- a/spaces/gwang-kim/DATID-3D/eg3d/training/augment.py +++ /dev/null @@ -1,441 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-NvidiaProprietary -# -# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual -# property and proprietary rights in and to this material, related -# documentation and any modifications thereto. Any use, reproduction, -# disclosure or distribution of this material and related documentation -# without an express license agreement from NVIDIA CORPORATION or -# its affiliates is strictly prohibited. - -"""Augmentation pipeline from the paper -"Training Generative Adversarial Networks with Limited Data". -Matches the original implementation by Karras et al. at -https://github.com/NVlabs/stylegan2-ada/blob/main/training/augment.py""" - -import numpy as np -import scipy.signal -import torch -from torch_utils import persistence -from torch_utils import misc -from torch_utils.ops import upfirdn2d -from torch_utils.ops import grid_sample_gradfix -from torch_utils.ops import conv2d_gradfix - -#---------------------------------------------------------------------------- -# Coefficients of various wavelet decomposition low-pass filters. - -wavelets = { - 'haar': [0.7071067811865476, 0.7071067811865476], - 'db1': [0.7071067811865476, 0.7071067811865476], - 'db2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025], - 'db3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569], - 'db4': [-0.010597401784997278, 0.032883011666982945, 0.030841381835986965, -0.18703481171888114, -0.02798376941698385, 0.6308807679295904, 0.7148465705525415, 0.23037781330885523], - 'db5': [0.003335725285001549, -0.012580751999015526, -0.006241490213011705, 0.07757149384006515, -0.03224486958502952, -0.24229488706619015, 0.13842814590110342, 0.7243085284385744, 0.6038292697974729, 0.160102397974125], - 'db6': [-0.00107730108499558, 0.004777257511010651, 0.0005538422009938016, -0.031582039318031156, 0.02752286553001629, 0.09750160558707936, -0.12976686756709563, -0.22626469396516913, 0.3152503517092432, 0.7511339080215775, 0.4946238903983854, 0.11154074335008017], - 'db7': [0.0003537138000010399, -0.0018016407039998328, 0.00042957797300470274, 0.012550998556013784, -0.01657454163101562, -0.03802993693503463, 0.0806126091510659, 0.07130921926705004, -0.22403618499416572, -0.14390600392910627, 0.4697822874053586, 0.7291320908465551, 0.39653931948230575, 0.07785205408506236], - 'db8': [-0.00011747678400228192, 0.0006754494059985568, -0.0003917403729959771, -0.00487035299301066, 0.008746094047015655, 0.013981027917015516, -0.04408825393106472, -0.01736930100202211, 0.128747426620186, 0.00047248457399797254, -0.2840155429624281, -0.015829105256023893, 0.5853546836548691, 0.6756307362980128, 0.3128715909144659, 0.05441584224308161], - 'sym2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025], - 'sym3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569], - 'sym4': [-0.07576571478927333, -0.02963552764599851, 0.49761866763201545, 0.8037387518059161, 0.29785779560527736, -0.09921954357684722, -0.012603967262037833, 0.0322231006040427], - 'sym5': [0.027333068345077982, 0.029519490925774643, -0.039134249302383094, 0.1993975339773936, 0.7234076904024206, 0.6339789634582119, 0.01660210576452232, -0.17532808990845047, -0.021101834024758855, 0.019538882735286728], - 'sym6': [0.015404109327027373, 0.0034907120842174702, -0.11799011114819057, -0.048311742585633, 0.4910559419267466, 0.787641141030194, 0.3379294217276218, -0.07263752278646252, -0.021060292512300564, 0.04472490177066578, 0.0017677118642428036, -0.007800708325034148], - 'sym7': [0.002681814568257878, -0.0010473848886829163, -0.01263630340325193, 0.03051551316596357, 0.0678926935013727, -0.049552834937127255, 0.017441255086855827, 0.5361019170917628, 0.767764317003164, 0.2886296317515146, -0.14004724044296152, -0.10780823770381774, 0.004010244871533663, 0.010268176708511255], - 'sym8': [-0.0033824159510061256, -0.0005421323317911481, 0.03169508781149298, 0.007607487324917605, -0.1432942383508097, -0.061273359067658524, 0.4813596512583722, 0.7771857517005235, 0.3644418948353314, -0.05194583810770904, -0.027219029917056003, 0.049137179673607506, 0.003808752013890615, -0.01495225833704823, -0.0003029205147213668, 0.0018899503327594609], -} - -#---------------------------------------------------------------------------- -# Helpers for constructing transformation matrices. - -def matrix(*rows, device=None): - assert all(len(row) == len(rows[0]) for row in rows) - elems = [x for row in rows for x in row] - ref = [x for x in elems if isinstance(x, torch.Tensor)] - if len(ref) == 0: - return misc.constant(np.asarray(rows), device=device) - assert device is None or device == ref[0].device - elems = [x if isinstance(x, torch.Tensor) else misc.constant(x, shape=ref[0].shape, device=ref[0].device) for x in elems] - return torch.stack(elems, dim=-1).reshape(ref[0].shape + (len(rows), -1)) - -def translate2d(tx, ty, **kwargs): - return matrix( - [1, 0, tx], - [0, 1, ty], - [0, 0, 1], - **kwargs) - -def translate3d(tx, ty, tz, **kwargs): - return matrix( - [1, 0, 0, tx], - [0, 1, 0, ty], - [0, 0, 1, tz], - [0, 0, 0, 1], - **kwargs) - -def scale2d(sx, sy, **kwargs): - return matrix( - [sx, 0, 0], - [0, sy, 0], - [0, 0, 1], - **kwargs) - -def scale3d(sx, sy, sz, **kwargs): - return matrix( - [sx, 0, 0, 0], - [0, sy, 0, 0], - [0, 0, sz, 0], - [0, 0, 0, 1], - **kwargs) - -def rotate2d(theta, **kwargs): - return matrix( - [torch.cos(theta), torch.sin(-theta), 0], - [torch.sin(theta), torch.cos(theta), 0], - [0, 0, 1], - **kwargs) - -def rotate3d(v, theta, **kwargs): - vx = v[..., 0]; vy = v[..., 1]; vz = v[..., 2] - s = torch.sin(theta); c = torch.cos(theta); cc = 1 - c - return matrix( - [vx*vx*cc+c, vx*vy*cc-vz*s, vx*vz*cc+vy*s, 0], - [vy*vx*cc+vz*s, vy*vy*cc+c, vy*vz*cc-vx*s, 0], - [vz*vx*cc-vy*s, vz*vy*cc+vx*s, vz*vz*cc+c, 0], - [0, 0, 0, 1], - **kwargs) - -def translate2d_inv(tx, ty, **kwargs): - return translate2d(-tx, -ty, **kwargs) - -def scale2d_inv(sx, sy, **kwargs): - return scale2d(1 / sx, 1 / sy, **kwargs) - -def rotate2d_inv(theta, **kwargs): - return rotate2d(-theta, **kwargs) - -#---------------------------------------------------------------------------- -# Versatile image augmentation pipeline from the paper -# "Training Generative Adversarial Networks with Limited Data". -# -# All augmentations are disabled by default; individual augmentations can -# be enabled by setting their probability multipliers to 1. - -@persistence.persistent_class -class AugmentPipe(torch.nn.Module): - def __init__(self, - xflip=0, rotate90=0, xint=0, xint_max=0.125, - scale=0, rotate=0, aniso=0, xfrac=0, scale_std=0.2, rotate_max=1, aniso_std=0.2, xfrac_std=0.125, - brightness=0, contrast=0, lumaflip=0, hue=0, saturation=0, brightness_std=0.2, contrast_std=0.5, hue_max=1, saturation_std=1, - imgfilter=0, imgfilter_bands=[1,1,1,1], imgfilter_std=1, - noise=0, cutout=0, noise_std=0.1, cutout_size=0.5, - ): - super().__init__() - self.register_buffer('p', torch.ones([])) # Overall multiplier for augmentation probability. - - # Pixel blitting. - self.xflip = float(xflip) # Probability multiplier for x-flip. - self.rotate90 = float(rotate90) # Probability multiplier for 90 degree rotations. - self.xint = float(xint) # Probability multiplier for integer translation. - self.xint_max = float(xint_max) # Range of integer translation, relative to image dimensions. - - # General geometric transformations. - self.scale = float(scale) # Probability multiplier for isotropic scaling. - self.rotate = float(rotate) # Probability multiplier for arbitrary rotation. - self.aniso = float(aniso) # Probability multiplier for anisotropic scaling. - self.xfrac = float(xfrac) # Probability multiplier for fractional translation. - self.scale_std = float(scale_std) # Log2 standard deviation of isotropic scaling. - self.rotate_max = float(rotate_max) # Range of arbitrary rotation, 1 = full circle. - self.aniso_std = float(aniso_std) # Log2 standard deviation of anisotropic scaling. - self.xfrac_std = float(xfrac_std) # Standard deviation of frational translation, relative to image dimensions. - - # Color transformations. - self.brightness = float(brightness) # Probability multiplier for brightness. - self.contrast = float(contrast) # Probability multiplier for contrast. - self.lumaflip = float(lumaflip) # Probability multiplier for luma flip. - self.hue = float(hue) # Probability multiplier for hue rotation. - self.saturation = float(saturation) # Probability multiplier for saturation. - self.brightness_std = float(brightness_std) # Standard deviation of brightness. - self.contrast_std = float(contrast_std) # Log2 standard deviation of contrast. - self.hue_max = float(hue_max) # Range of hue rotation, 1 = full circle. - self.saturation_std = float(saturation_std) # Log2 standard deviation of saturation. - - # Image-space filtering. - self.imgfilter = float(imgfilter) # Probability multiplier for image-space filtering. - self.imgfilter_bands = list(imgfilter_bands) # Probability multipliers for individual frequency bands. - self.imgfilter_std = float(imgfilter_std) # Log2 standard deviation of image-space filter amplification. - - # Image-space corruptions. - self.noise = float(noise) # Probability multiplier for additive RGB noise. - self.cutout = float(cutout) # Probability multiplier for cutout. - self.noise_std = float(noise_std) # Standard deviation of additive RGB noise. - self.cutout_size = float(cutout_size) # Size of the cutout rectangle, relative to image dimensions. - - # Setup orthogonal lowpass filter for geometric augmentations. - self.register_buffer('Hz_geom', upfirdn2d.setup_filter(wavelets['sym6'])) - - # Construct filter bank for image-space filtering. - Hz_lo = np.asarray(wavelets['sym2']) # H(z) - Hz_hi = Hz_lo * ((-1) ** np.arange(Hz_lo.size)) # H(-z) - Hz_lo2 = np.convolve(Hz_lo, Hz_lo[::-1]) / 2 # H(z) * H(z^-1) / 2 - Hz_hi2 = np.convolve(Hz_hi, Hz_hi[::-1]) / 2 # H(-z) * H(-z^-1) / 2 - Hz_fbank = np.eye(4, 1) # Bandpass(H(z), b_i) - for i in range(1, Hz_fbank.shape[0]): - Hz_fbank = np.dstack([Hz_fbank, np.zeros_like(Hz_fbank)]).reshape(Hz_fbank.shape[0], -1)[:, :-1] - Hz_fbank = scipy.signal.convolve(Hz_fbank, [Hz_lo2]) - Hz_fbank[i, (Hz_fbank.shape[1] - Hz_hi2.size) // 2 : (Hz_fbank.shape[1] + Hz_hi2.size) // 2] += Hz_hi2 - self.register_buffer('Hz_fbank', torch.as_tensor(Hz_fbank, dtype=torch.float32)) - - def forward(self, images, debug_percentile=None): - assert isinstance(images, torch.Tensor) and images.ndim == 4 - batch_size, num_channels, height, width = images.shape - device = images.device - if debug_percentile is not None: - debug_percentile = torch.as_tensor(debug_percentile, dtype=torch.float32, device=device) - - # ------------------------------------- - # Select parameters for pixel blitting. - # ------------------------------------- - - # Initialize inverse homogeneous 2D transform: G_inv @ pixel_out ==> pixel_in - I_3 = torch.eye(3, device=device) - G_inv = I_3 - - # Apply x-flip with probability (xflip * strength). - if self.xflip > 0: - i = torch.floor(torch.rand([batch_size], device=device) * 2) - i = torch.where(torch.rand([batch_size], device=device) < self.xflip * self.p, i, torch.zeros_like(i)) - if debug_percentile is not None: - i = torch.full_like(i, torch.floor(debug_percentile * 2)) - G_inv = G_inv @ scale2d_inv(1 - 2 * i, 1) - - # Apply 90 degree rotations with probability (rotate90 * strength). - if self.rotate90 > 0: - i = torch.floor(torch.rand([batch_size], device=device) * 4) - i = torch.where(torch.rand([batch_size], device=device) < self.rotate90 * self.p, i, torch.zeros_like(i)) - if debug_percentile is not None: - i = torch.full_like(i, torch.floor(debug_percentile * 4)) - G_inv = G_inv @ rotate2d_inv(-np.pi / 2 * i) - - # Apply integer translation with probability (xint * strength). - if self.xint > 0: - t = (torch.rand([batch_size, 2], device=device) * 2 - 1) * self.xint_max - t = torch.where(torch.rand([batch_size, 1], device=device) < self.xint * self.p, t, torch.zeros_like(t)) - if debug_percentile is not None: - t = torch.full_like(t, (debug_percentile * 2 - 1) * self.xint_max) - G_inv = G_inv @ translate2d_inv(torch.round(t[:,0] * width), torch.round(t[:,1] * height)) - - # -------------------------------------------------------- - # Select parameters for general geometric transformations. - # -------------------------------------------------------- - - # Apply isotropic scaling with probability (scale * strength). - if self.scale > 0: - s = torch.exp2(torch.randn([batch_size], device=device) * self.scale_std) - s = torch.where(torch.rand([batch_size], device=device) < self.scale * self.p, s, torch.ones_like(s)) - if debug_percentile is not None: - s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.scale_std)) - G_inv = G_inv @ scale2d_inv(s, s) - - # Apply pre-rotation with probability p_rot. - p_rot = 1 - torch.sqrt((1 - self.rotate * self.p).clamp(0, 1)) # P(pre OR post) = p - if self.rotate > 0: - theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.rotate_max - theta = torch.where(torch.rand([batch_size], device=device) < p_rot, theta, torch.zeros_like(theta)) - if debug_percentile is not None: - theta = torch.full_like(theta, (debug_percentile * 2 - 1) * np.pi * self.rotate_max) - G_inv = G_inv @ rotate2d_inv(-theta) # Before anisotropic scaling. - - # Apply anisotropic scaling with probability (aniso * strength). - if self.aniso > 0: - s = torch.exp2(torch.randn([batch_size], device=device) * self.aniso_std) - s = torch.where(torch.rand([batch_size], device=device) < self.aniso * self.p, s, torch.ones_like(s)) - if debug_percentile is not None: - s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.aniso_std)) - G_inv = G_inv @ scale2d_inv(s, 1 / s) - - # Apply post-rotation with probability p_rot. - if self.rotate > 0: - theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.rotate_max - theta = torch.where(torch.rand([batch_size], device=device) < p_rot, theta, torch.zeros_like(theta)) - if debug_percentile is not None: - theta = torch.zeros_like(theta) - G_inv = G_inv @ rotate2d_inv(-theta) # After anisotropic scaling. - - # Apply fractional translation with probability (xfrac * strength). - if self.xfrac > 0: - t = torch.randn([batch_size, 2], device=device) * self.xfrac_std - t = torch.where(torch.rand([batch_size, 1], device=device) < self.xfrac * self.p, t, torch.zeros_like(t)) - if debug_percentile is not None: - t = torch.full_like(t, torch.erfinv(debug_percentile * 2 - 1) * self.xfrac_std) - G_inv = G_inv @ translate2d_inv(t[:,0] * width, t[:,1] * height) - - # ---------------------------------- - # Execute geometric transformations. - # ---------------------------------- - - # Execute if the transform is not identity. - if G_inv is not I_3: - - # Calculate padding. - cx = (width - 1) / 2 - cy = (height - 1) / 2 - cp = matrix([-cx, -cy, 1], [cx, -cy, 1], [cx, cy, 1], [-cx, cy, 1], device=device) # [idx, xyz] - cp = G_inv @ cp.t() # [batch, xyz, idx] - Hz_pad = self.Hz_geom.shape[0] // 4 - margin = cp[:, :2, :].permute(1, 0, 2).flatten(1) # [xy, batch * idx] - margin = torch.cat([-margin, margin]).max(dim=1).values # [x0, y0, x1, y1] - margin = margin + misc.constant([Hz_pad * 2 - cx, Hz_pad * 2 - cy] * 2, device=device) - margin = margin.max(misc.constant([0, 0] * 2, device=device)) - margin = margin.min(misc.constant([width-1, height-1] * 2, device=device)) - mx0, my0, mx1, my1 = margin.ceil().to(torch.int32) - - # Pad image and adjust origin. - images = torch.nn.functional.pad(input=images, pad=[mx0,mx1,my0,my1], mode='reflect') - G_inv = translate2d((mx0 - mx1) / 2, (my0 - my1) / 2) @ G_inv - - # Upsample. - images = upfirdn2d.upsample2d(x=images, f=self.Hz_geom, up=2) - G_inv = scale2d(2, 2, device=device) @ G_inv @ scale2d_inv(2, 2, device=device) - G_inv = translate2d(-0.5, -0.5, device=device) @ G_inv @ translate2d_inv(-0.5, -0.5, device=device) - - # Execute transformation. - shape = [batch_size, num_channels, (height + Hz_pad * 2) * 2, (width + Hz_pad * 2) * 2] - G_inv = scale2d(2 / images.shape[3], 2 / images.shape[2], device=device) @ G_inv @ scale2d_inv(2 / shape[3], 2 / shape[2], device=device) - grid = torch.nn.functional.affine_grid(theta=G_inv[:,:2,:], size=shape, align_corners=False) - images = grid_sample_gradfix.grid_sample(images, grid) - - # Downsample and crop. - images = upfirdn2d.downsample2d(x=images, f=self.Hz_geom, down=2, padding=-Hz_pad*2, flip_filter=True) - - # -------------------------------------------- - # Select parameters for color transformations. - # -------------------------------------------- - - # Initialize homogeneous 3D transformation matrix: C @ color_in ==> color_out - I_4 = torch.eye(4, device=device) - C = I_4 - - # Apply brightness with probability (brightness * strength). - if self.brightness > 0: - b = torch.randn([batch_size], device=device) * self.brightness_std - b = torch.where(torch.rand([batch_size], device=device) < self.brightness * self.p, b, torch.zeros_like(b)) - if debug_percentile is not None: - b = torch.full_like(b, torch.erfinv(debug_percentile * 2 - 1) * self.brightness_std) - C = translate3d(b, b, b) @ C - - # Apply contrast with probability (contrast * strength). - if self.contrast > 0: - c = torch.exp2(torch.randn([batch_size], device=device) * self.contrast_std) - c = torch.where(torch.rand([batch_size], device=device) < self.contrast * self.p, c, torch.ones_like(c)) - if debug_percentile is not None: - c = torch.full_like(c, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.contrast_std)) - C = scale3d(c, c, c) @ C - - # Apply luma flip with probability (lumaflip * strength). - v = misc.constant(np.asarray([1, 1, 1, 0]) / np.sqrt(3), device=device) # Luma axis. - if self.lumaflip > 0: - i = torch.floor(torch.rand([batch_size, 1, 1], device=device) * 2) - i = torch.where(torch.rand([batch_size, 1, 1], device=device) < self.lumaflip * self.p, i, torch.zeros_like(i)) - if debug_percentile is not None: - i = torch.full_like(i, torch.floor(debug_percentile * 2)) - C = (I_4 - 2 * v.ger(v) * i) @ C # Householder reflection. - - # Apply hue rotation with probability (hue * strength). - if self.hue > 0 and num_channels > 1: - theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.hue_max - theta = torch.where(torch.rand([batch_size], device=device) < self.hue * self.p, theta, torch.zeros_like(theta)) - if debug_percentile is not None: - theta = torch.full_like(theta, (debug_percentile * 2 - 1) * np.pi * self.hue_max) - C = rotate3d(v, theta) @ C # Rotate around v. - - # Apply saturation with probability (saturation * strength). - if self.saturation > 0 and num_channels > 1: - s = torch.exp2(torch.randn([batch_size, 1, 1], device=device) * self.saturation_std) - s = torch.where(torch.rand([batch_size, 1, 1], device=device) < self.saturation * self.p, s, torch.ones_like(s)) - if debug_percentile is not None: - s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.saturation_std)) - C = (v.ger(v) + (I_4 - v.ger(v)) * s) @ C - - # ------------------------------ - # Execute color transformations. - # ------------------------------ - - # Execute if the transform is not identity. - if C is not I_4: - images = images.reshape([batch_size, num_channels, height * width]) - if num_channels == 3: - images = C[:, :3, :3] @ images + C[:, :3, 3:] - elif num_channels == 1: - C = C[:, :3, :].mean(dim=1, keepdims=True) - images = images * C[:, :, :3].sum(dim=2, keepdims=True) + C[:, :, 3:] - elif num_channels == 6: - images[:, :3] = C[:, :3, :3] @ images[:, :3] + C[:, :3, 3:] - images[:, 3:] = C[:, :3, :3] @ images[:, 3:] + C[:, :3, 3:] - else: - raise ValueError('Image must be RGB (3 channels) or L (1 channel)') - images = images.reshape([batch_size, num_channels, height, width]) - - # ---------------------- - # Image-space filtering. - # ---------------------- - - if self.imgfilter > 0: - num_bands = self.Hz_fbank.shape[0] - assert len(self.imgfilter_bands) == num_bands - expected_power = misc.constant(np.array([10, 1, 1, 1]) / 13, device=device) # Expected power spectrum (1/f). - - # Apply amplification for each band with probability (imgfilter * strength * band_strength). - g = torch.ones([batch_size, num_bands], device=device) # Global gain vector (identity). - for i, band_strength in enumerate(self.imgfilter_bands): - t_i = torch.exp2(torch.randn([batch_size], device=device) * self.imgfilter_std) - t_i = torch.where(torch.rand([batch_size], device=device) < self.imgfilter * self.p * band_strength, t_i, torch.ones_like(t_i)) - if debug_percentile is not None: - t_i = torch.full_like(t_i, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.imgfilter_std)) if band_strength > 0 else torch.ones_like(t_i) - t = torch.ones([batch_size, num_bands], device=device) # Temporary gain vector. - t[:, i] = t_i # Replace i'th element. - t = t / (expected_power * t.square()).sum(dim=-1, keepdims=True).sqrt() # Normalize power. - g = g * t # Accumulate into global gain. - - # Construct combined amplification filter. - Hz_prime = g @ self.Hz_fbank # [batch, tap] - Hz_prime = Hz_prime.unsqueeze(1).repeat([1, num_channels, 1]) # [batch, channels, tap] - Hz_prime = Hz_prime.reshape([batch_size * num_channels, 1, -1]) # [batch * channels, 1, tap] - - # Apply filter. - p = self.Hz_fbank.shape[1] // 2 - images = images.reshape([1, batch_size * num_channels, height, width]) - images = torch.nn.functional.pad(input=images, pad=[p,p,p,p], mode='reflect') - images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(2), groups=batch_size*num_channels) - images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(3), groups=batch_size*num_channels) - images = images.reshape([batch_size, num_channels, height, width]) - - # ------------------------ - # Image-space corruptions. - # ------------------------ - - # Apply additive RGB noise with probability (noise * strength). - if self.noise > 0: - sigma = torch.randn([batch_size, 1, 1, 1], device=device).abs() * self.noise_std - sigma = torch.where(torch.rand([batch_size, 1, 1, 1], device=device) < self.noise * self.p, sigma, torch.zeros_like(sigma)) - if debug_percentile is not None: - sigma = torch.full_like(sigma, torch.erfinv(debug_percentile) * self.noise_std) - images = images + torch.randn([batch_size, num_channels, height, width], device=device) * sigma - - # Apply cutout with probability (cutout * strength). - if self.cutout > 0: - size = torch.full([batch_size, 2, 1, 1, 1], self.cutout_size, device=device) - size = torch.where(torch.rand([batch_size, 1, 1, 1, 1], device=device) < self.cutout * self.p, size, torch.zeros_like(size)) - center = torch.rand([batch_size, 2, 1, 1, 1], device=device) - if debug_percentile is not None: - size = torch.full_like(size, self.cutout_size) - center = torch.full_like(center, debug_percentile) - coord_x = torch.arange(width, device=device).reshape([1, 1, 1, -1]) - coord_y = torch.arange(height, device=device).reshape([1, 1, -1, 1]) - mask_x = (((coord_x + 0.5) / width - center[:, 0]).abs() >= size[:, 0] / 2) - mask_y = (((coord_y + 0.5) / height - center[:, 1]).abs() >= size[:, 1] / 2) - mask = torch.logical_or(mask_x, mask_y).to(torch.float32) - images = images * mask - - return images - -#---------------------------------------------------------------------------- diff --git a/spaces/gyugnsu/DragGan-Inversion/PTI/criteria/__init__.py b/spaces/gyugnsu/DragGan-Inversion/PTI/criteria/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/gyugnsu/DragGan-Inversion/viz/capture_widget.py b/spaces/gyugnsu/DragGan-Inversion/viz/capture_widget.py deleted file mode 100644 index 79cc4f80c5bba2cf1e67593e85fb85cd7963ed89..0000000000000000000000000000000000000000 --- a/spaces/gyugnsu/DragGan-Inversion/viz/capture_widget.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import os -import re -import numpy as np -import imgui -import PIL.Image -from gui_utils import imgui_utils -from . import renderer -import torch -import torchvision - -# ---------------------------------------------------------------------------- - - -class CaptureWidget: - def __init__(self, viz): - self.viz = viz - self.path = os.path.abspath(os.path.join( - os.path.dirname(__file__), '..', '_screenshots')) - self.dump_image = False - self.dump_gui = False - self.defer_frames = 0 - self.disabled_time = 0 - - def dump_png(self, image): - viz = self.viz - try: - _height, _width, channels = image.shape - print(viz.result) - assert image.dtype == np.uint8 - os.makedirs(self.path, exist_ok=True) - file_id = 0 - for entry in os.scandir(self.path): - if entry.is_file(): - match = re.fullmatch(r'(\d+).*', entry.name) - if match: - file_id = max(file_id, int(match.group(1)) + 1) - if channels == 1: - pil_image = PIL.Image.fromarray(image[:, :, 0], 'L') - else: - pil_image = PIL.Image.fromarray(image[:, :, :3], 'RGB') - pil_image.save(os.path.join(self.path, f'{file_id:05d}.png')) - np.save(os.path.join( - self.path, f'{file_id:05d}.npy'), viz.result.w) - except: - viz.result.error = renderer.CapturedException() - - @imgui_utils.scoped_by_object_id - def __call__(self, show=True): - viz = self.viz - if show: - with imgui_utils.grayed_out(self.disabled_time != 0): - imgui.text('Capture') - imgui.same_line(viz.label_w) - - _changed, self.path = imgui_utils.input_text('##path', self.path, 1024, - flags=( - imgui.INPUT_TEXT_AUTO_SELECT_ALL | imgui.INPUT_TEXT_ENTER_RETURNS_TRUE), - width=(-1), - help_text='PATH') - if imgui.is_item_hovered() and not imgui.is_item_active() and self.path != '': - imgui.set_tooltip(self.path) - imgui.text(' ') - imgui.same_line(viz.label_w) - if imgui_utils.button('Save image', width=viz.button_w, enabled=(self.disabled_time == 0 and 'image' in viz.result)): - self.dump_image = True - self.defer_frames = 2 - self.disabled_time = 0.5 - imgui.same_line() - if imgui_utils.button('Save GUI', width=viz.button_w, enabled=(self.disabled_time == 0)): - self.dump_gui = True - self.defer_frames = 2 - self.disabled_time = 0.5 - - self.disabled_time = max(self.disabled_time - viz.frame_delta, 0) - if self.defer_frames > 0: - self.defer_frames -= 1 - elif self.dump_image: - if 'image' in viz.result: - self.dump_png(viz.result.image) - self.dump_image = False - elif self.dump_gui: - viz.capture_next_frame() - self.dump_gui = False - captured_frame = viz.pop_captured_frame() - if captured_frame is not None: - self.dump_png(captured_frame) - -# ---------------------------------------------------------------------------- diff --git a/spaces/h2oai/wave-tour/examples/graphics_hilbert.py b/spaces/h2oai/wave-tour/examples/graphics_hilbert.py deleted file mode 100644 index bc8d907e744698012bc82c96dbde34dbf7dffa4c..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/graphics_hilbert.py +++ /dev/null @@ -1,54 +0,0 @@ -# Graphics / Hilbert -# Use turtle #graphics recursively to draw Hilbert curves. -# --- -from h2o_wave import ui, main, app, Q, graphics as g - - -def hilbert(t: g.Turtle, width: float, depth: int, reverse=False): # recursive - angle = -90 if reverse else 90 - - if depth == 0: - t.f(width).r(angle).f(width).r(angle).f(width) - return - - side = width * ((2 ** depth) - 1) / float((2 ** (depth + 1)) - 1) - edge = width - 2 * side - - t.r(angle) - hilbert(t, side, depth - 1, not reverse) - t.r(angle).f(edge) - hilbert(t, side, depth - 1, reverse) - t.l(angle).f(edge).l(angle) - hilbert(t, side, depth - 1, reverse) - t.f(edge).r(angle) - hilbert(t, side, depth - 1, not reverse) - t.r(angle) - - -def make_hilbert_curve(width: float, depth: int): - t = g.turtle().f(0).pd() - hilbert(t, width, depth) - return t.d() - - -@app('/demo') -async def serve(q: Q): - hilbert_curve = make_hilbert_curve(300, q.args.depth or 5) - - if not q.client.initialized: - q.page['curve'] = ui.graphics_card( - box='1 1 4 6', view_box='0 0 300 300', width='100%', height='100%', - scene=g.scene( - hilbert_curve=g.path(d=hilbert_curve, fill='none', stroke='#333') - ), - ) - q.page['form'] = ui.form_card( - box='1 7 4 1', items=[ - ui.slider(name='depth', label='Play with this Hilbert curve!', min=1, max=6, value=5, trigger=True), - ], - ) - q.client.initialized = True - else: - g.draw(q.page['curve'].scene.hilbert_curve, d=hilbert_curve) - - await q.page.save() diff --git a/spaces/h2oai/wave-tour/examples/layout_size.py b/spaces/h2oai/wave-tour/examples/layout_size.py deleted file mode 100644 index 75acd8e0c570091e45330eee50008631521f627f..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/layout_size.py +++ /dev/null @@ -1,39 +0,0 @@ -# Layout / Size -# How to adjust the size of cards on a page. #layout -# --- - -from h2o_wave import site, ui - -# Every page has a grid system in place. -# The grid has 12 columns and 10 rows. -# A column is 134 pixels wide. -# A row is 76 pixels high. -# The gap between rows and columns is set to 15 pixels. - -# Cards have a `box` attribute that specifies its column, row, width and height. -# box = 'column row width height' -# They indicate the 1-based column/row to position the top-left corner of the card. - -# In this example, we place multiple cards on a page to demonstrate their `box` values. - -page = site['/demo'] -boxes = [ - '1 1 1 1', - '2 1 2 1', - '4 1 3 1', - '7 1 4 1', - '11 1 2 2', - '1 2 1 9', - '2 2 1 4', - '3 2 1 2', - '2 6 1 5', - '3 4 1 7', - '4 2 7 9', - '11 9 2 2', - '11 3 2 6', -] - -for box in boxes: - page[f'card_{box.replace(" ", "_")}'] = ui.markdown_card(box=box, title=box, content='') - -page.save() diff --git a/spaces/hamacojr/SAM-CAT-Seg/datasets/README.md b/spaces/hamacojr/SAM-CAT-Seg/datasets/README.md deleted file mode 100644 index db2642a9b39eab0d02857ac2dafb15b4658e7cad..0000000000000000000000000000000000000000 --- a/spaces/hamacojr/SAM-CAT-Seg/datasets/README.md +++ /dev/null @@ -1,167 +0,0 @@ -# Prepare Datasets for CAT-Seg - -A dataset can be used by accessing [DatasetCatalog](https://detectron2.readthedocs.io/modules/data.html#detectron2.data.DatasetCatalog) -for its data, or [MetadataCatalog](https://detectron2.readthedocs.io/modules/data.html#detectron2.data.MetadataCatalog) for its metadata (class names, etc). -This document explains how to setup the builtin datasets so they can be used by the above APIs. -[Use Custom Datasets](https://detectron2.readthedocs.io/tutorials/datasets.html) gives a deeper dive on how to use `DatasetCatalog` and `MetadataCatalog`, -and how to add new datasets to them. - -CAT-Seg has builtin support for a few datasets. -The datasets are assumed to exist in a directory specified by the environment variable -`DETECTRON2_DATASETS`. -Under this directory, detectron2 will look for datasets in the structure described below, if needed. -``` -$DETECTRON2_DATASETS/ - coco/ # COCO-Stuff - ADEChallengeData2016/ # ADE20K-150 - ADE20K_2021_17_01/ # ADE20K-847 - VOCdevkit/ - VOC2010/ # PASCAL Context - VOC2012/ # PASCAL VOC -``` - -You can set the location for builtin datasets by `export DETECTRON2_DATASETS=/path/to/datasets`. -If left unset, the default is `./datasets` relative to your current working directory. - -## Prepare data for [COCO-Stuff](https://github.com/nightrome/cocostuff): - -### Expected data structure - -``` -coco-stuff/ - annotations/ - train2017/ - val2017/ - images/ - train2017/ - val2017/ - # below are generated by prepare_coco_stuff.py - annotations_detectron2/ - train2017/ - val2017/ -``` -Download the COCO (2017) images from https://cocodataset.org/ - -```bash -wget http://images.cocodataset.org/zips/train2017.zip -wget http://images.cocodataset.org/zips/val2017.zip -``` - -Download the COCO-Stuff annotation from https://github.com/nightrome/cocostuff. -```bash -wget http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip -``` -Unzip `train2017.zip`, `val2017.zip`, and `stuffthingmaps_trainval2017.zip`. Then put them to the correct location listed above. - -Generate the labels for training and testing. - -``` -python datasets/prepare_coco_stuff.py -``` - - - -## Prepare data for [ADE20K-150](http://sceneparsing.csail.mit.edu): - -### Expected data structure -``` -ADEChallengeData2016/ - annotations/ - validation/ - images/ - validation/ - # below are generated by prepare_ade20k_150.py - annotations_detectron2/ - validation/ -``` -Download the data of ADE20K-150 from http://sceneparsing.csail.mit.edu. -``` -wget http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip -``` -Unzip `ADEChallengeData2016.zip` and generate the labels for testing. -``` -python datasets/prepare_ade20k_150.py -``` -## Prepare data for [ADE20k-847](https://groups.csail.mit.edu/vision/datasets/ADE20K/): - -### Expected data structure -``` -ADE20K_2021_17_01/ - images/ - ADE/ - validation/ - index_ade20k.mat - index_ade20k.pkl - # below are generated by prepare_ade20k_847.py - annotations_detectron2/ - validation/ -``` -Download the data of ADE20k-Full from https://groups.csail.mit.edu/vision/datasets/ADE20K/request_data/ -Unzip the dataset and generate the labels for testing. -``` -python datasets/prepare_ade20k_847.py -``` - -## Prepare data for [PASCAL VOC 2012](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/#devkit): - - -### Expected data structure -``` -VOCdevkit/ - VOC2012/ - Annotations/ - ImageSets/ - JPEGImages/ - SegmentationClass/ - SegmentationClassAug/ - SegmentationObject/ - # below are generated by prepare_voc.py - annotations_detectron2 - annotations_detectron2_bg - -``` -Download the data of PASCAL VOC from http://host.robots.ox.ac.uk/pascal/VOC/voc2012/#devkit. - -We use SBD augmentated training data as SegmentationClassAug following [Deeplab](https://github.com/kazuto1011/deeplab-pytorch/blob/master/data/datasets/voc12/README.md). -``` -wget http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar -wget https://www.dropbox.com/s/oeu149j8qtbs1x0/SegmentationClassAug.zip -``` -Unzip `VOCtrainval_11-May-2012.tar` and `SegmentationClassAug.zip`. Then put them to the correct location listed above and generate the labels for testing. -``` -python datasets/prepare_voc.py -``` - - -## Prepare data for [PASCAL Context](https://www.cs.stanford.edu/~roozbeh/pascal-context/): - - -### Expected data structure -``` -VOCdevkit/ - VOC2010/ - Annotations/ - ImageSets/ - JPEGImages/ - SegmentationClass/ - SegmentationObject/ - trainval/ - labels.txt - 59_labels.txt - pascalcontext_val.txt - # below are generated by prepare_pascal_context.py - annotations_detectron2/ - pc459_val - pc59_val -``` -Download the data of PASCAL VOC 2010 from https://www.cs.stanford.edu/~roozbeh/pascal-context/. - -``` -wget http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar -wget https://www.cs.stanford.edu/~roozbeh/pascal-context/trainval.tar.gz -wget https://www.cs.stanford.edu/~roozbeh/pascal-context/59_labels.txt -``` -Unzip `VOCtrainval_03-May-2010.tar` and `trainval.tar.gz`. Then put them to the correct location listed above and generate the labels for testing. -``` -python datasets/prepare_pascal_context.py -``` \ No newline at end of file diff --git a/spaces/hamelcubsfan/AutoGPT/Dockerfile b/spaces/hamelcubsfan/AutoGPT/Dockerfile deleted file mode 100644 index 8396154998f32a50d55c199a674b638d5cf7bda2..0000000000000000000000000000000000000000 --- a/spaces/hamelcubsfan/AutoGPT/Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -# Use an official Python base image from the Docker Hub -FROM python:3.10-slim - -# Install git -RUN apt-get -y update -RUN apt-get -y install git chromium-driver - -# Install Xvfb and other dependencies for headless browser testing -RUN apt-get update \ - && apt-get install -y wget gnupg2 libgtk-3-0 libdbus-glib-1-2 dbus-x11 xvfb ca-certificates - -# Install Firefox / Chromium -RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \ - && echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \ - && apt-get update \ - && apt-get install -y chromium firefox-esr - -# Set environment variables -ENV PIP_NO_CACHE_DIR=yes \ - PYTHONUNBUFFERED=1 \ - PYTHONDONTWRITEBYTECODE=1 - -# Create a non-root user and set permissions -RUN useradd --create-home appuser -WORKDIR /home/appuser -RUN chown appuser:appuser /home/appuser -USER appuser - -# Copy the requirements.txt file and install the requirements -COPY --chown=appuser:appuser requirements.txt . -RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \ - pip install --no-cache-dir --user -r requirements.txt - -# Copy the application files -COPY --chown=appuser:appuser autogpt/ ./autogpt - -# Set the entrypoint -ENTRYPOINT ["python", "-m", "autogpt"] diff --git a/spaces/hamelcubsfan/AutoGPT/autogpt/memory/weaviate.py b/spaces/hamelcubsfan/AutoGPT/autogpt/memory/weaviate.py deleted file mode 100644 index 5408e9a97aa3594ad443448cfc31f2546a01eb09..0000000000000000000000000000000000000000 --- a/spaces/hamelcubsfan/AutoGPT/autogpt/memory/weaviate.py +++ /dev/null @@ -1,127 +0,0 @@ -import uuid - -import weaviate -from weaviate import Client -from weaviate.embedded import EmbeddedOptions -from weaviate.util import generate_uuid5 - -from autogpt.config import Config -from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding - - -def default_schema(weaviate_index): - return { - "class": weaviate_index, - "properties": [ - { - "name": "raw_text", - "dataType": ["text"], - "description": "original text for the embedding", - } - ], - } - - -class WeaviateMemory(MemoryProviderSingleton): - def __init__(self, cfg): - auth_credentials = self._build_auth_credentials(cfg) - - url = f"{cfg.weaviate_protocol}://{cfg.weaviate_host}:{cfg.weaviate_port}" - - if cfg.use_weaviate_embedded: - self.client = Client( - embedded_options=EmbeddedOptions( - hostname=cfg.weaviate_host, - port=int(cfg.weaviate_port), - persistence_data_path=cfg.weaviate_embedded_path, - ) - ) - - print( - f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}" - ) - else: - self.client = Client(url, auth_client_secret=auth_credentials) - - self.index = WeaviateMemory.format_classname(cfg.memory_index) - self._create_schema() - - @staticmethod - def format_classname(index): - # weaviate uses capitalised index names - # The python client uses the following code to format - # index names before the corresponding class is created - if len(index) == 1: - return index.capitalize() - return index[0].capitalize() + index[1:] - - def _create_schema(self): - schema = default_schema(self.index) - if not self.client.schema.contains(schema): - self.client.schema.create_class(schema) - - def _build_auth_credentials(self, cfg): - if cfg.weaviate_username and cfg.weaviate_password: - return weaviate.AuthClientPassword( - cfg.weaviate_username, cfg.weaviate_password - ) - if cfg.weaviate_api_key: - return weaviate.AuthApiKey(api_key=cfg.weaviate_api_key) - else: - return None - - def add(self, data): - vector = get_ada_embedding(data) - - doc_uuid = generate_uuid5(data, self.index) - data_object = {"raw_text": data} - - with self.client.batch as batch: - batch.add_data_object( - uuid=doc_uuid, - data_object=data_object, - class_name=self.index, - vector=vector, - ) - - return f"Inserting data into memory at uuid: {doc_uuid}:\n data: {data}" - - def get(self, data): - return self.get_relevant(data, 1) - - def clear(self): - self.client.schema.delete_all() - - # weaviate does not yet have a neat way to just remove the items in an index - # without removing the entire schema, therefore we need to re-create it - # after a call to delete_all - self._create_schema() - - return "Obliterated" - - def get_relevant(self, data, num_relevant=5): - query_embedding = get_ada_embedding(data) - try: - results = ( - self.client.query.get(self.index, ["raw_text"]) - .with_near_vector({"vector": query_embedding, "certainty": 0.7}) - .with_limit(num_relevant) - .do() - ) - - if len(results["data"]["Get"][self.index]) > 0: - return [ - str(item["raw_text"]) for item in results["data"]["Get"][self.index] - ] - else: - return [] - - except Exception as err: - print(f"Unexpected error {err=}, {type(err)=}") - return [] - - def get_stats(self): - result = self.client.query.aggregate(self.index).with_meta_count().do() - class_data = result["data"]["Aggregate"][self.index] - - return class_data[0]["meta"] if class_data else {} diff --git a/spaces/hamzapehlivan/StyleRes/options/editing_options/template.py b/spaces/hamzapehlivan/StyleRes/options/editing_options/template.py deleted file mode 100644 index 179eb7b7220e21cda71de69a500f852a7b399bce..0000000000000000000000000000000000000000 --- a/spaces/hamzapehlivan/StyleRes/options/editing_options/template.py +++ /dev/null @@ -1,10 +0,0 @@ - -# This config saves inversion and smile edit results - -edit_configs = [ - dict( method='inversion'), - dict( method='interfacegan', edit='smile', strength=2), - dict( method='ganspace', edit='overexposed', strength=5), - dict( method='styleclip', type='mapper', edit='purple_hair', strength=0.08), - # dict( method='styleclip', type='global', edit='curly_hair', strength=3, disentanglement=0.29), -] diff --git a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/data/build.py b/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/data/build.py deleted file mode 100644 index 8c0b96b0cabc250d622daff128a2776a819c0d5e..0000000000000000000000000000000000000000 --- a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/data/build.py +++ /dev/null @@ -1,489 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import bisect -import copy -import logging -import os - -import torch.utils.data -import torch.distributed as dist -from maskrcnn_benchmark.utils.comm import get_world_size -from maskrcnn_benchmark.utils.imports import import_file - -from . import datasets as D -from . import samplers - -from .collate_batch import BatchCollator, BBoxAugCollator -from .transforms import build_transforms - -from transformers import AutoTokenizer -from .datasets.duplicate_dataset import create_duplicate_dataset - -def build_dataset(cfg, dataset_list, transforms, dataset_catalog, is_train=True, class_concat=False, extra_args={}): - """ - Arguments: - dataset_list (list[str]): Contains the names of the datasets, i.e., - coco_2014_trian, coco_2014_val, etc - transforms (callable): transforms to apply to each (image, target) sample - dataset_catalog (DatasetCatalog): contains the information on how to - construct a dataset. - is_train (bool): whether to setup the dataset for training or testing - """ - if not isinstance(dataset_list, (list, tuple)): - raise RuntimeError( - "dataset_list should be a list of strings, got {}".format(dataset_list) - ) - datasets = [] - num_category = 1 - for dataset_id, dataset_name in enumerate(dataset_list, 1): - if is_train: - dataset_name = dataset_name + cfg.DATASETS.TRAIN_DATASETNAME_SUFFIX - else: - dataset_name = dataset_name + cfg.DATASETS.TEST_DATASETNAME_SUFFIX - data = dataset_catalog.get(dataset_name) - factory = getattr(D, data["factory"]) - args = data["args"] - # for COCODataset, we want to remove images without annotations - # during training - if data["factory"] == "COCODataset": - args["remove_images_without_annotations"] = is_train - - if data["factory"] == "PascalVOCDataset": - args["use_difficult"] = not is_train - if data["factory"] in ["VGTSVDataset", "CocoDetectionTSV", "ODTSVDataset"]: - args["extra_fields"] = ["class"] - if cfg.MODEL.MASK_ON: - args["extra_fields"].append("mask") - - if data["factory"] in ["CocoGrounding", "CocoDetectionTSV", "CaptionTSV", "MixedDataset", "FlickrDataset", "RefExpDataset", "GQADataset", "PseudoData", "PhrasecutDetection"]: - # args["return_masks"] = False - args["return_masks"] = cfg.MODEL.MASK_ON - args["return_tokens"] = True - args["max_num_labels"] = cfg.TEST.MDETR_STYLE_AGGREGATE_CLASS_NUM - args["max_query_len"] = cfg.MODEL.LANGUAGE_BACKBONE.MAX_QUERY_LEN - - args["transforms"] = transforms - args.update(extra_args) - - if dataset_name == "flickr30k_train": - copy = cfg.DATASETS.FLICKR_COPY - elif dataset_name in ["mixed_train", "mixed_train_no_coco"]: - copy = cfg.DATASETS.MIXED_COPY - elif dataset_name == "COCO_odinw_train_8copy_dt_train": - copy = cfg.DATASETS.COCO_COPY - elif dataset_name == "LVIS_odinw_train_8copy_dt_train": - copy = cfg.DATASETS.LVIS_COPY - elif dataset_name == "object365_odinw_2copy_dt_train": - copy = cfg.DATASETS.OBJECT365_COPY - elif dataset_name == "vg_odinw_clipped_8copy_dt_train": - copy = cfg.DATASETS.VG_COPY - elif dataset_name == "vg_vgoi6_clipped_8copy_dt_train": - copy = cfg.DATASETS.VG_COPY - elif dataset_name == "imagenetod_train_odinw_2copy_dt": - copy = cfg.DATASETS.IN_COPY - elif dataset_name == "oi_train_odinw_dt": - copy = cfg.DATASETS.OI_COPY - elif is_train: - copy = cfg.DATASETS.GENERAL_COPY - elif not is_train: - copy = cfg.DATASETS.GENERAL_COPY_TEST - else: - copy = -1 # do not ever copy test - - if copy != -1: - new_factory = create_duplicate_dataset(factory) - dataset = new_factory(copy=copy, **args) - else: - # make dataset from factory - dataset = factory(**args) - - print(dataset_name, 'has the {} data points'.format(len(dataset)), data["factory"]) - - if class_concat: - category = list(dataset.contiguous_category_id_to_json_id.values()) - dataset.contiguous_category_id_to_json_id = {} - dataset.json_category_id_to_contiguous_id = {} - for id, cat in enumerate(category, start=num_category): - dataset.json_category_id_to_contiguous_id[cat] = id - dataset.contiguous_category_id_to_json_id[id] = cat - num_category += len(category) - print("Found {} #category after group {}, concating ...".format(num_category, dataset_id)) - datasets.append(dataset) - - # for testing, return a list of datasets - if not is_train: - return datasets - - # for training, concatenate all datasets into a single one - dataset = datasets[0] - if len(datasets) > 1: - dataset = D.ConcatDataset(datasets) - - return [dataset] - - -def build_dataset_by_group(dataset_list, transforms, dataset_catalog, is_train=True, class_by_group=True, - class_concat=False, extra_args={}): - """ - Arguments: - dataset_list (list[str]): Contains the names of the datasets, i.e., - coco_2014_trian, coco_2014_val, etc - transforms (callable): transforms to apply to each (image, target) sample - dataset_catalog (DatasetCatalog): contains the information on how to - construct a dataset. - is_train (bool): whether to setup the dataset for training or testing - """ - if not isinstance(dataset_list, (list, tuple)): - raise RuntimeError( - "dataset_list should be a list of strings, got {}".format(dataset_list) - ) - - num_category = 1 - grouped_datasets = [] - for group_id, group in enumerate(dataset_list, 1): - datasets = [] - for dataset_name in group: - data = dataset_catalog.get(dataset_name) - factory = getattr(D, data["factory"]) - args = data["args"] - # for COCODataset, we want to remove images without annotations - # during training - if data["factory"] == "COCODataset": - args["remove_images_without_annotations"] = is_train - if data["factory"] == "PascalVOCDataset": - args["use_difficult"] = not is_train - args["transforms"] = transforms - args.update(extra_args) - # make dataset from factory - dataset = factory(**args) - - # check if dataset is grouped by task, assume one class per task - if class_by_group and data["factory"] != "Background": - category = dataset.contiguous_category_id_to_json_id[1] - del dataset.contiguous_category_id_to_json_id[1] - dataset.json_category_id_to_contiguous_id[category] = group_id - dataset.contiguous_category_id_to_json_id[group_id] = category - - datasets.append(dataset) - - if class_concat: - for dataset in datasets: - category = list(dataset.contiguous_category_id_to_json_id.values()) - dataset.contiguous_category_id_to_json_id = {} - dataset.json_category_id_to_contiguous_id = {} - for id, cat in enumerate(category, start=num_category): - dataset.json_category_id_to_contiguous_id[cat] = id - dataset.contiguous_category_id_to_json_id[id] = cat - num_category += len(category) - print("Found {} #category after group {}, concating ...".format(num_category, group_id)) - - if is_train: - datasets = D.ConcatDataset(datasets) - - grouped_datasets.append(datasets) - - # for testing, return a list of datasets - if not is_train: - datasets = [dataset for group in grouped_datasets for dataset in group] - return datasets - if class_concat: - grouped_datasets = D.ConcatDataset(grouped_datasets) - return [grouped_datasets] - - # for training, concatenate all datasets into a single one - return grouped_datasets - - -def make_data_sampler(dataset, shuffle, distributed, num_replicas=None, rank=None, use_random_seed=True): - if distributed: - return samplers.DistributedSampler(dataset, shuffle=shuffle, num_replicas=num_replicas, rank=rank, - use_random=use_random_seed) - if shuffle: - sampler = torch.utils.data.sampler.RandomSampler(dataset) - else: - sampler = torch.utils.data.sampler.SequentialSampler(dataset) - return sampler - - -def _quantize(x, bins): - bins = copy.copy(bins) - bins = sorted(bins) - quantized = list(map(lambda y: bisect.bisect_right(bins, y), x)) - return quantized - - -def _compute_aspect_ratios(dataset): - aspect_ratios = [] - for i in range(len(dataset)): - img_info = dataset.get_img_info(i) - aspect_ratio = float(img_info["height"]) / float(img_info["width"]) - aspect_ratios.append(aspect_ratio) - return aspect_ratios - - -def make_batch_data_sampler( - dataset, sampler, aspect_grouping, images_per_batch, num_iters=None, start_iter=0, drop_last=False -): - if aspect_grouping: - if not isinstance(aspect_grouping, (list, tuple)): - aspect_grouping = [aspect_grouping] - aspect_ratios = _compute_aspect_ratios(dataset) - group_ids = _quantize(aspect_ratios, aspect_grouping) - batch_sampler = samplers.GroupedBatchSampler( - sampler, group_ids, images_per_batch, drop_uneven=drop_last - ) - else: - batch_sampler = torch.utils.data.sampler.BatchSampler( - sampler, images_per_batch, drop_last=drop_last - ) - if num_iters is not None: - batch_sampler = samplers.IterationBasedBatchSampler( - batch_sampler, num_iters, start_iter - ) - return batch_sampler - -def make_data_loader(cfg, is_train=True, is_distributed=False, num_replicas=None, rank=None, start_iter=0): - num_gpus = num_replicas or get_world_size() - - if is_train: - images_per_batch = cfg.SOLVER.IMS_PER_BATCH - assert ( - images_per_batch % num_gpus == 0 - ), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number " - "of GPUs ({}) used.".format(images_per_batch, num_gpus) - images_per_gpu = images_per_batch // num_gpus - shuffle = True - num_iters = cfg.SOLVER.MAX_ITER - else: - images_per_batch = cfg.TEST.IMS_PER_BATCH - assert ( - images_per_batch % num_gpus == 0 - ), "TEST.IMS_PER_BATCH ({}) must be divisible by the number " - "of GPUs ({}) used.".format(images_per_batch, num_gpus) - images_per_gpu = images_per_batch // num_gpus - shuffle = False if not is_distributed else True - num_iters = None - start_iter = 0 - - if images_per_gpu > 1: - logger = logging.getLogger(__name__) - logger.warning( - "When using more than one image per GPU you may encounter " - "an out-of-memory (OOM) error if your GPU does not have " - "sufficient memory. If this happens, you can reduce " - "SOLVER.IMS_PER_BATCH (for training) or " - "TEST.IMS_PER_BATCH (for inference). For training, you must " - "also adjust the learning rate and schedule length according " - "to the linear scaling rule. See for example: " - "https://github.com/facebookresearch/Detectron/blob/master/configs/getting_started/tutorial_1gpu_e2e_faster_rcnn_R-50-FPN.yaml#L14" - ) - - # group images which have similar aspect ratio. In this case, we only - # group in two cases: those with width / height > 1, and the other way around, - # but the code supports more general grouping strategy - aspect_grouping = [1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else [] - - paths_catalog = import_file( - "maskrcnn_benchmark.config.paths_catalog", cfg.PATHS_CATALOG, True - ) - - DatasetCatalog = paths_catalog.DatasetCatalog - if len(cfg.DATASETS.REGISTER) > 0: - for new_dataset in cfg.DATASETS.REGISTER: - # img_dir = cfg.DATASETS.REGISTER[new_dataset]["img_dir"] - # if "ann_file" in cfg.DATASETS.REGISTER[new_dataset]: - # ann_file = cfg.DATASETS.REGISTER[new_dataset]["ann_file"] - # else: - # ann_file = None - attrs = dict(cfg.DATASETS.REGISTER[new_dataset]) - if is_train: - new_dataset = new_dataset + cfg.DATASETS.TRAIN_DATASETNAME_SUFFIX - else: - new_dataset = new_dataset + cfg.DATASETS.TEST_DATASETNAME_SUFFIX - DatasetCatalog.set(new_dataset, attrs) - - - dataset_list = cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST - - # Haotian: expand bing dataset - if "bing_caption_train" in dataset_list and len(cfg.DATASETS.BING_INDEX_LIST) > 0: - dataset_list = list(dataset_list) - dataset_list.remove("bing_caption_train") - for bing_index in cfg.DATASETS.BING_INDEX_LIST: - dataset_list.insert(len(dataset_list), "bing_caption_{}_train".format(bing_index)) - dataset_list = tuple(dataset_list) - - if "bing_caption_train_no_coco" in dataset_list and len(cfg.DATASETS.BING_INDEX_LIST) > 0: - dataset_list = list(dataset_list) - dataset_list.remove("bing_caption_train_no_coco") - for bing_index in cfg.DATASETS.BING_INDEX_LIST: - dataset_list.insert(len(dataset_list), "bing_caption_{}_train_no_coco".format(bing_index)) - dataset_list = tuple(dataset_list) - - print("The combined datasets are: {}.".format(dataset_list)) - - transforms = None if not is_train and cfg.TEST.USE_MULTISCALE else build_transforms(cfg, is_train) - - extra_args = {} - if is_train and cfg.DATASETS.USE_CROWD: - extra_args['ignore_crowd'] = False - if is_train and cfg.DATASETS.MAX_BOX > 0: - extra_args['max_box'] = cfg.DATASETS.MAX_BOX - if is_train and cfg.DATASETS.FEW_SHOT>0: - extra_args['few_shot'] = cfg.DATASETS.FEW_SHOT - if is_train and cfg.DATASETS.SHUFFLE_SEED != 0: - extra_args['shuffle_seed'] = cfg.DATASETS.SHUFFLE_SEED - - # od to grounding - if is_train and cfg.DATASETS.RANDOM_SAMPLE_NEG > 0: - extra_args['random_sample_negative'] = cfg.DATASETS.RANDOM_SAMPLE_NEG - if is_train and cfg.DATASETS.ADD_DET_PROMPT: - extra_args["add_detection_prompt"] = True - if is_train and cfg.DATASETS.USE_OD_AUG: - extra_args["use_od_data_aug"] = True - if is_train and cfg.DATASETS.DISABLE_SHUFFLE: - extra_args["disable_shuffle"] = True - if cfg.DATASETS.ONE_HOT: - extra_args["one_hot"] = True - if is_train and len(cfg.DATASETS.PROMPT_VERSION) > 0: - extra_args["prompt_engineer_version"] = cfg.DATASETS.PROMPT_VERSION - if is_train and len(cfg.DATASETS.CONTROL_PROB) == 4: - extra_args["control_probabilities"] = cfg.DATASETS.CONTROL_PROB - if is_train and cfg.DATASETS.DISABLE_CLIP_TO_IMAGE: - extra_args["disable_clip_to_image"] = cfg.DATASETS.DISABLE_CLIP_TO_IMAGE - if is_train and cfg.DATASETS.NO_MINUS_ONE_FOR_ONE_HOT: - extra_args["no_minus_one_for_one_hot"] = cfg.DATASETS.NO_MINUS_ONE_FOR_ONE_HOT - if is_train: - extra_args["separation_tokens"] = cfg.DATASETS.SEPARATION_TOKENS - # caption - if is_train and cfg.DATASETS.CAPTION_MIN_BOX > 0: - extra_args["caption_min_box"] = cfg.DATASETS.CAPTION_MIN_BOX - if is_train and cfg.DATASETS.REPLACE_CLEAN_LABEL: - extra_args["replace_clean_label"] = True - if is_train and cfg.DATASETS.FURTHER_SCREEN: - extra_args["further_screen"] = True - if is_train and cfg.DATASETS.CAPTION_CONF > 0.0: - extra_args["caption_conf"] = cfg.DATASETS.CAPTION_CONF - if is_train: - extra_args["caption_nms"] = cfg.DATASETS.CAPTION_NMS - if is_train and cfg.DATASETS.PACK_RANDOM_CAPTION_NUMBER > 0: - extra_args["pack_random_caption_number"] = cfg.DATASETS.PACK_RANDOM_CAPTION_NUMBER - if is_train and cfg.DATASETS.INFERENCE_CAPTION: - extra_args["inference_caption"] = True - if is_train and cfg.DATASETS.SAMPLE_NEGATIVE_FOR_GROUNDING_DATA > 0: - extra_args["sample_negative_for_grounding_data"] = cfg.DATASETS.SAMPLE_NEGATIVE_FOR_GROUNDING_DATA - if is_train and cfg.DATASETS.RANDOM_PACK_PROB > 0: - extra_args["random_pack_prob"] = cfg.DATASETS.RANDOM_PACK_PROB - if is_train and cfg.DATASETS.NO_RANDOM_PACK_PROBABILITY > 0: - extra_args["no_random_pack_probability"] = cfg.DATASETS.NO_RANDOM_PACK_PROBABILITY - if is_train: - extra_args["safeguard_positive_caption"] = cfg.DATASETS.SAFEGUARD_POSITIVE_CAPTION - if is_train: - extra_args["local_debug"] = cfg.DATASETS.LOCAL_DEBUG - if is_train: - extra_args["no_mask_for_od"] = cfg.MODEL.DYHEAD.FUSE_CONFIG.NO_MASK_FOR_OD - if is_train: - extra_args["no_mask_for_gold"] = cfg.MODEL.DYHEAD.FUSE_CONFIG.NO_MASK_FOR_GOLD - if is_train: - extra_args["mlm_obj_for_only_positive"] = cfg.MODEL.DYHEAD.FUSE_CONFIG.MLM_OBJ_FOR_ONLY_POSITIVE - if cfg.DATASETS.OVERRIDE_CATEGORY and cfg.DATASETS.USE_OVERRIDE_CATEGORY: - extra_args["override_category"] = cfg.DATASETS.OVERRIDE_CATEGORY - if is_train: - extra_args["caption_format_version"] = cfg.DATASETS.CAPTION_FORMAT_VERSION - if is_train: - extra_args["special_safeguard_for_coco_grounding"] = cfg.DATASETS.SPECIAL_SAFEGUARD_FOR_COCO_GROUNDING - if is_train: - extra_args["diver_box_for_vqa"] = cfg.DATASETS.DIVER_BOX_FOR_VQA - extra_args["caption_prompt"] = cfg.DATASETS.CAPTION_PROMPT - extra_args["use_caption_prompt"] = cfg.DATASETS.USE_CAPTION_PROMPT - - # extra_args['tokenizer'] = AutoTokenizer.from_pretrained(cfg.MODEL.LANGUAGE_BACKBONE.TOKENIZER_TYPE) - if cfg.MODEL.LANGUAGE_BACKBONE.TOKENIZER_TYPE == "clip": - # extra_args['tokenizer'] = build_tokenizer("clip") - from transformers import CLIPTokenizerFast - if cfg.MODEL.DYHEAD.FUSE_CONFIG.MLM_LOSS: - extra_args["tokenizer"] = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32", from_slow=True, mask_token='ðŁĴij') - else: - extra_args["tokenizer"] = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32", from_slow=True) - else: - extra_args['tokenizer'] = AutoTokenizer.from_pretrained(cfg.MODEL.LANGUAGE_BACKBONE.TOKENIZER_TYPE) - - if isinstance(dataset_list[0], (tuple, list)): - datasets = build_dataset_by_group(dataset_list, transforms, DatasetCatalog, is_train, - class_by_group=cfg.DATASETS.ALTERNATIVE_TRAINING, - class_concat=cfg.DATASETS.CLASS_CONCAT, - extra_args=extra_args) - else: - datasets = build_dataset(cfg, dataset_list, transforms, DatasetCatalog, is_train, - class_concat=cfg.DATASETS.CLASS_CONCAT, - extra_args=extra_args) - - data_loaders = [] - for di, dataset in enumerate(datasets): - if is_train and cfg.SOLVER.MAX_EPOCH > 0: - num_iters = cfg.SOLVER.MAX_EPOCH * len(dataset) // cfg.SOLVER.IMS_PER_BATCH - print("Number of iterations are {}".format(num_iters)) - cfg.defrost() - cfg.SOLVER.MAX_ITER = num_iters - cfg.SOLVER.DATASET_LENGTH = len(dataset) - cfg.freeze() - if is_train and cfg.SOLVER.MULTI_MAX_EPOCH: - num_iters = None - cfg.defrost() - cfg.SOLVER.MULTI_MAX_ITER += (cfg.SOLVER.MULTI_MAX_EPOCH[di] * len(dataset) // cfg.SOLVER.IMS_PER_BATCH,) - cfg.freeze() - - if is_train and cfg.DATALOADER.DISTRIBUTE_CHUNK_AMONG_NODE: - from .datasets.custom_distributed_sampler import DistributedSamplerChunkByNode - chunk_or_not = [] - for i in dataset_list: - if "bing_caption" in i: - chunk_or_not.append(True) - else: - chunk_or_not.append(False) - assert(len(chunk_or_not) == len(dataset.datasets)) - ''' - If we are training on 4 nodes, each with 8 GPUs - ''' - num_nodes = int(os.getenv('NODE_COUNT', os.getenv('OMPI_COMM_WORLD_SIZE', 1))) - local_size = cfg.num_gpus//num_nodes - node_rank = int(os.getenv('NODE_RANK', os.getenv('OMPI_COMM_WORLD_RANK', 0))) - local_rank = cfg.local_rank - sampler = DistributedSamplerChunkByNode( - dataset = dataset, - all_datasets = dataset.datasets, # Assumming dataset is a ConcateDataset instance, - chunk_or_not = chunk_or_not, - num_replicas = cfg.num_gpus, # total GPU number, e.g., 32 - rank = dist.get_rank(), # Global Rank, e.g., 0~31 - node_rank = node_rank, # Node Rank, e.g., 0~3 - node_number = num_nodes, # how many node e.g., 4 - process_num_per_node = local_size, # e.g., 8 - rank_within_local_node = local_rank, # e.g., 0~7 - ) - else: - sampler = make_data_sampler(dataset, shuffle, is_distributed, num_replicas=num_replicas, rank=rank, - use_random_seed=cfg.DATALOADER.USE_RANDOM_SEED) - batch_sampler = make_batch_data_sampler( - dataset, sampler, aspect_grouping, images_per_gpu, num_iters, start_iter, drop_last=is_train - ) - collator = BBoxAugCollator() if not is_train and cfg.TEST.USE_MULTISCALE else BatchCollator( - cfg.DATALOADER.SIZE_DIVISIBILITY) - num_workers = cfg.DATALOADER.NUM_WORKERS - data_loader = torch.utils.data.DataLoader( - dataset, - num_workers=num_workers, - batch_sampler=batch_sampler, - collate_fn=collator, - ) - data_loaders.append(data_loader) - if is_train and cfg.SOLVER.MULTI_MAX_EPOCH: - cfg.defrost() - cfg.SOLVER.MULTI_MAX_ITER += ( - cfg.SOLVER.MULTI_MAX_EPOCH[-1] * min([len(dataset) // cfg.SOLVER.IMS_PER_BATCH for dataset in datasets]),) - cfg.freeze() - - if is_train and not cfg.DATASETS.ALTERNATIVE_TRAINING and not cfg.DATASETS.MULTISTAGE_TRAINING: - # during training, a single (possibly concatenated) data_loader is returned - assert len(data_loaders) == 1 - return data_loaders[0] - - return data_loaders diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/solver/__init__.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/solver/__init__.py deleted file mode 100644 index 10f84e12d029a07d5c7d3ac29e18b572a92ef03c..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/solver/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -from .build import build_lr_scheduler, build_optimizer -from .lr_scheduler import WarmupCosineLR, WarmupMultiStepLR - -__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/spaces/heiyubili/bingo/src/components/ui/alert-dialog.tsx b/spaces/heiyubili/bingo/src/components/ui/alert-dialog.tsx deleted file mode 100644 index 17fec4d16510328deacc1416569173c97761ef72..0000000000000000000000000000000000000000 --- a/spaces/heiyubili/bingo/src/components/ui/alert-dialog.tsx +++ /dev/null @@ -1,150 +0,0 @@ -'use client' - -import * as React from 'react' -import * as AlertDialogPrimitive from '@radix-ui/react-alert-dialog' - -import { cn } from '@/lib/utils' -import { buttonVariants } from '@/components/ui/button' - -const AlertDialog = AlertDialogPrimitive.Root - -const AlertDialogTrigger = AlertDialogPrimitive.Trigger - -const AlertDialogPortal = ({ - className, - children, - ...props -}: AlertDialogPrimitive.AlertDialogPortalProps) => ( - -
- {children} -
-
-) -AlertDialogPortal.displayName = AlertDialogPrimitive.Portal.displayName - -const AlertDialogOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - -)) -AlertDialogOverlay.displayName = AlertDialogPrimitive.Overlay.displayName - -const AlertDialogContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - - - - -)) -AlertDialogContent.displayName = AlertDialogPrimitive.Content.displayName - -const AlertDialogHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
-) -AlertDialogHeader.displayName = 'AlertDialogHeader' - -const AlertDialogFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
-) -AlertDialogFooter.displayName = 'AlertDialogFooter' - -const AlertDialogTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogTitle.displayName = AlertDialogPrimitive.Title.displayName - -const AlertDialogDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogDescription.displayName = - AlertDialogPrimitive.Description.displayName - -const AlertDialogAction = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogAction.displayName = AlertDialogPrimitive.Action.displayName - -const AlertDialogCancel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogCancel.displayName = AlertDialogPrimitive.Cancel.displayName - -export { - AlertDialog, - AlertDialogTrigger, - AlertDialogContent, - AlertDialogHeader, - AlertDialogFooter, - AlertDialogTitle, - AlertDialogDescription, - AlertDialogAction, - AlertDialogCancel -} diff --git a/spaces/huaiji3y/bingo-Public/src/lib/hooks/use-at-bottom.tsx b/spaces/huaiji3y/bingo-Public/src/lib/hooks/use-at-bottom.tsx deleted file mode 100644 index d37c8cf4162adcb0064e08ecec24eb731416b045..0000000000000000000000000000000000000000 --- a/spaces/huaiji3y/bingo-Public/src/lib/hooks/use-at-bottom.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import * as React from 'react' - -export function useAtBottom(offset = 0) { - const [isAtBottom, setIsAtBottom] = React.useState(false) - - React.useEffect(() => { - const handleScroll = () => { - setIsAtBottom( - window.innerHeight + window.scrollY >= - document.body.offsetHeight - offset - ) - } - - window.addEventListener('scroll', handleScroll, { passive: true }) - handleScroll() - - return () => { - window.removeEventListener('scroll', handleScroll) - } - }, [offset]) - - return isAtBottom -} diff --git a/spaces/hugginglearners/image-style-transfer/app.py b/spaces/hugginglearners/image-style-transfer/app.py deleted file mode 100644 index c8c2c592e9a067bc2c440015815e2fdd98cc10f3..0000000000000000000000000000000000000000 --- a/spaces/hugginglearners/image-style-transfer/app.py +++ /dev/null @@ -1,58 +0,0 @@ -import math -import numpy as np -import pandas as pd - -import gradio as gr -from huggingface_hub import from_pretrained_fastai -from fastai.vision.all import * -from torchvision.models import vgg19, vgg16 -from utils import * - -pascal_source = '.' -EXAMPLES_PATH = Path('./examples') -repo_id = "hugginglearners/fastai-style-transfer" - - -def _inner(feat_net, hooks, x): - feat_net(x) - return hooks.stored - -def _get_layers(arch:str, pretrained=True): - "Get the layers and arch for a VGG Model (16 and 19 are supported only)" - feat_net = vgg19(pretrained=pretrained) if arch.find('9') > 1 else vgg16(pretrained=pretrained) - config = _vgg_config.get(arch) - features = feat_net.features.eval() - for p in features.parameters(): p.requires_grad=False - return feat_net, [features[i] for i in config] - - -_vgg_config = { - 'vgg16' : [1, 11, 18, 25, 20], - 'vgg19' : [1, 6, 11, 20, 29, 22] -} - -feat_net, layers = _get_layers('vgg19', True) -hooks = hook_outputs(layers, detach=False) - -learner = from_pretrained_fastai(repo_id) - -def infer(img): - pred = learner.predict(img) - image = pred[0].numpy() - image = image.transpose((1, 2, 0)) - plt.imshow(image) - return plt.gcf() #pred[0].show() - -# get the inputs -inputs = gr.inputs.Image(shape=(192, 192)) - -# the app outputs two segmented images -output = gr.Plot() -# it's good practice to pass examples, description and a title to guide users -title = 'Style transfer' -description = '' -article = "Author: Nhu Hoang. " -examples = [f'{EXAMPLES_PATH}/{f.name}' for f in EXAMPLES_PATH.iterdir()] - -gr.Interface(infer, inputs, output, examples= examples, allow_flagging='never', cache_examples=False, - title=title, description=description, article=article, live=False).launch(enable_queue=True, debug=False, inbrowser=False) diff --git a/spaces/hugginglearners/rice-image-classification/app.py b/spaces/hugginglearners/rice-image-classification/app.py deleted file mode 100644 index 13bb0c9ebf3b8662655365e201d5525b1984e6f6..0000000000000000000000000000000000000000 --- a/spaces/hugginglearners/rice-image-classification/app.py +++ /dev/null @@ -1,38 +0,0 @@ -import gradio as gr -import torch -from huggingface_hub import from_pretrained_fastai -from pathlib import Path - -examples = ["examples/example_0.png", - "examples/example_1.png", - "examples/example_2.png", - "examples/example_3.png", - "examples/example_4.png"] - -repo_id = "hugginglearners/rice_image_classification" -path = Path("./") - -def get_y(r): - return r["label"] - -def get_x(r): - return path/r["fname"] - -learner = from_pretrained_fastai(repo_id) -labels = learner.dls.vocab - -def inference(image): - label_predict,_,probs = learner.predict(image) - labels_probs = {labels[i]: float(probs[i]) for i, _ in enumerate(labels)} - return labels_probs - -gr.Interface( - fn=inference, - title="Rice image classification", - description = "Predict which type of rice belong to Arborio, Basmati, Ipsala, Jasmine, Karacadag", - inputs="image", - examples=examples, - outputs=gr.outputs.Label(num_top_classes=5, label='Prediction'), - cache_examples=False, - article = "Author: Vu Minh Chien", -).launch(debug=True, enable_queue=True) \ No newline at end of file diff --git a/spaces/hylee/photo2cartoon/p2c/data_process.py b/spaces/hylee/photo2cartoon/p2c/data_process.py deleted file mode 100644 index 6b96bfa3bc535712e063d7ba53d8e892a532b216..0000000000000000000000000000000000000000 --- a/spaces/hylee/photo2cartoon/p2c/data_process.py +++ /dev/null @@ -1,30 +0,0 @@ -import os -import cv2 -import numpy as np -from tqdm import tqdm -import argparse - -from utils import Preprocess - - -parser = argparse.ArgumentParser() -parser.add_argument('--data_path', type=str, help='photo folder path') -parser.add_argument('--save_path', type=str, help='save folder path') - -args = parser.parse_args() -os.makedirs(args.save_path, exist_ok=True) - -pre = Preprocess() - -for idx, img_name in enumerate(tqdm(os.listdir(args.data_path))): - img = cv2.cvtColor(cv2.imread(os.path.join(args.data_path, img_name)), cv2.COLOR_BGR2RGB) - - # face alignment and segmentation - face_rgba = pre.process(img) - if face_rgba is not None: - # change background to white - face = face_rgba[:,:,:3].copy() - mask = face_rgba[:,:,3].copy()[:,:,np.newaxis]/255. - face_white_bg = (face*mask + (1-mask)*255).astype(np.uint8) - - cv2.imwrite(os.path.join(args.save_path, str(idx).zfill(4)+'.png'), cv2.cvtColor(face_white_bg, cv2.COLOR_RGB2BGR)) diff --git a/spaces/imageomics/dashboard-prototype/Dockerfile b/spaces/imageomics/dashboard-prototype/Dockerfile deleted file mode 100644 index 36e2eb66b4d474348e1c45259f75c1c74c6193fb..0000000000000000000000000000000000000000 --- a/spaces/imageomics/dashboard-prototype/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM ghcr.io/imageomics/dashboard-prototype:latest -COPY run.sh /api/run.sh diff --git a/spaces/imageomics/dev-dashboard/run.sh b/spaces/imageomics/dev-dashboard/run.sh deleted file mode 100644 index 383a306e02443b9bcc62c4ae8ace19522d998c17..0000000000000000000000000000000000000000 --- a/spaces/imageomics/dev-dashboard/run.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -gunicorn -w ${BACKEND_WORKERS:=4} -b :7860 -t 360 dashboard:server diff --git a/spaces/inamXcontru/PoeticTTS/Claudia Lizaldi Revista H Extremo.pdfl !!BETTER!!.md b/spaces/inamXcontru/PoeticTTS/Claudia Lizaldi Revista H Extremo.pdfl !!BETTER!!.md deleted file mode 100644 index fb632f2480dd7a08220ff3d0daf1f7d0cb73fa23..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Claudia Lizaldi Revista H Extremo.pdfl !!BETTER!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

Claudia Lizaldi Revista H Extremo.pdfl


Download Filehttps://gohhs.com/2uz5w2



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/innat/HybridModel-GradCAM/models/__init__.py b/spaces/innat/HybridModel-GradCAM/models/__init__.py deleted file mode 100644 index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000 --- a/spaces/innat/HybridModel-GradCAM/models/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/spaces/innnky/soft-vits-vc/preprocess.py b/spaces/innnky/soft-vits-vc/preprocess.py deleted file mode 100644 index aaedbf076c30114b3ac6c27dfb42fd54ac81a71c..0000000000000000000000000000000000000000 --- a/spaces/innnky/soft-vits-vc/preprocess.py +++ /dev/null @@ -1,25 +0,0 @@ -import argparse -import text -from utils import load_filepaths_and_text - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("--out_extension", default="cleaned") - parser.add_argument("--text_index", default=1, type=int) - parser.add_argument("--filelists", nargs="+", default=["filelists/ljs_audio_text_val_filelist.txt", "filelists/ljs_audio_text_test_filelist.txt"]) - parser.add_argument("--text_cleaners", nargs="+", default=["english_cleaners2"]) - - args = parser.parse_args() - - - for filelist in args.filelists: - print("START:", filelist) - filepaths_and_text = load_filepaths_and_text(filelist) - for i in range(len(filepaths_and_text)): - original_text = filepaths_and_text[i][args.text_index] - cleaned_text = text._clean_text(original_text, args.text_cleaners) - filepaths_and_text[i][args.text_index] = cleaned_text - - new_filelist = filelist + "." + args.out_extension - with open(new_filelist, "w", encoding="utf-8") as f: - f.writelines(["|".join(x) + "\n" for x in filepaths_and_text]) diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Burp Suite Professional 1.7.37 Free Download UPD.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Burp Suite Professional 1.7.37 Free Download UPD.md deleted file mode 100644 index de588c817cece08262e590b52f563d62aca0d5e2..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Burp Suite Professional 1.7.37 Free Download UPD.md +++ /dev/null @@ -1,9 +0,0 @@ -
-

crawler:
the crawler is very similar to the intruder in that it is able to navigate web pages, download files, and submit forms. burp also includes a list of over 50,000 common web pages, so your crawl can be targeted. it can also be used to detect attack vectors.

-

Burp Suite Professional 1.7.37 Free Download


Download Filehttps://urlin.us/2uEwL1



-

intruder:
the intruder is burp’s most advanced spider. this tool is capable of simulating any browser, including the memory of its http requests. the intruder is able to navigate web pages, submit forms, download files, etc. this tool is great for hunting.

-

click on the to use tab to see burp in action as youll see in the screenshot below. this can be used to test any web application where you are wanting to see if there are any security issues with the software. in the screenshot below, i have a few issues in my code. you can click on a number to access the code in that location. theres also a small box to the right of the message that gives a code next to it. this code allows you to view the code for the error. once youre done viewing the code, you can click on the undo button. this will allow you to undo the changes you made. this is a great application to have to test your websites security.

-

the first tab youre going to want to check is the options tab. under the when starting burp section, youre going to want to enter the url that you want to test. if you want to scan a web server thats on the same network as your computer, you would type in then, click on start. once its done, click on the options button and set the port to 8080. click on save to save the changes. then click on the start scan button. this will start the scan and if there are any errors, theres a link to the code for the error. you can click on the link to view the code. the code can also be viewed from the scan tab.

-

899543212b
-
-
\ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/CRACKOBSStudio2202FullInstallerx64.md b/spaces/inplisQlawa/anything-midjourney-v4-1/CRACKOBSStudio2202FullInstallerx64.md deleted file mode 100644 index fd9ff9ea69af428e78b202024528df77ada6d094..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/CRACKOBSStudio2202FullInstallerx64.md +++ /dev/null @@ -1,6 +0,0 @@ -

CRACKOBSStudio2202FullInstallerx64


DOWNLOADhttps://urlin.us/2uEwOx



-
-... ca8d075f12 7d1ee9a32a8ebde8805c26d64564d65eba564072 66.33 MiB (69551216 Bytes) ... 15c24738db. CRACK OBS Studio 2202 Full Installer X64. 4d29de3e1b
-
-
-

diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen.md deleted file mode 100644 index 774a1b46396ce079905b338080fc22beb970dfb9..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen.md +++ /dev/null @@ -1,80 +0,0 @@ - -

Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen - The Ultimate Color Tool for Designers

- -

If you are a web designer, graphic designer, or any other creative professional who works with colors, you know how important it is to have a reliable and easy-to-use color tool. You need a tool that can help you create stunning color palettes, harmonize colors, preview colors on different backgrounds, and generate color codes for various formats. You also need a tool that can help you find and download color schemes from other sources, such as websites, images, or popular color libraries.

-

Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen


Download Filehttps://urlin.us/2uEw2j



- -

That's why you need Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen - the ultimate color tool for designers. Color Schemer Studio V2.0 is a powerful and versatile color software that can help you with all your color needs. Whether you are working on a website, a logo, a flyer, a poster, or any other design project, Color Schemer Studio V2.0 can help you create amazing color schemes in minutes.

- -

What is Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen?

- -

Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen is a full-featured color software that allows you to create and edit color schemes, preview colors on different backgrounds, generate color codes for various formats, and much more. It is compatible with Windows XP, Vista, 7, 8, and 10.

- -

Color Schemer Studio V2.0 has a user-friendly interface that lets you easily access all its features and functions. You can use the built-in color wheel to create harmonious color schemes based on different color models, such as RGB, CMYK, HSB, or Lab. You can also use the eyedropper tool to pick colors from any source on your screen, such as websites, images, or other applications.

- -

Color Schemer Studio V2.0 also allows you to preview colors on different backgrounds, such as solid colors, gradients, patterns, or images. You can adjust the brightness, contrast, saturation, and hue of any color to fine-tune your color scheme. You can also use the color mixer to blend two or more colors together and create new shades.

- -

Color Schemer Studio V2.0 can generate color codes for various formats, such as HTML hex codes, RGB values, CMYK values, HSB values, or Lab values. You can copy and paste these codes into your favorite design software or text editor. You can also export your color schemes as Adobe Photoshop palettes (.aco), Adobe Illustrator palettes (.ai), GIMP palettes (.gpl), or Paint Shop Pro palettes (.pal).

- -

Color Schemer Studio V2.0 also allows you to find and download color schemes from other sources, such as websites, images, or popular color libraries. You can use the web browser feature to browse any website and extract its color scheme. You can also use the image analyzer feature to analyze any image and extract its dominant colors. You can also use the online scheme library feature to access thousands of ready-made color schemes from various categories, such as web design, fashion design, interior design, or nature.

-

- -

How to Use Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen?

- -

To use Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen , you need to download and install it on your computer. You can download it from this link: https://bltlly.com/2jvt19 . After downloading it , you need to extract it using WinRAR or any other file compression software . Then , you need to run the setup file and follow the instructions to install it . After installing it , you need to activate it using the serial key keygen that is included in the download . To activate it , you need to run the keygen file and generate a serial key . Then , you need to enter the serial key into the registration window of Color Schemer Studio V2.0 and click OK . After activating it , you can start using it for your design projects .

- -

To create a color scheme using Color Schemer Studio V2.0 , you need to follow these steps:

- -
    -
  1. Open Color Schemer Studio V2.0 and click on the New Scheme button.
  2. -
  3. Select a base color from the color wheel or use the eyedropper tool to pick a color from any source on your screen.
  4. -
  5. Select a color model from the drop-down menu , such as RGB , CMYK , HSB , or Lab.
  6. -
  7. Select a harmony rule from the drop-down menu , such as complementary , analogous , triadic , tetradic , split complementary , or custom.
  8. -
  9. Adjust the number of colors in your scheme using the slider or the arrows.
  10. -
  11. Adjust the brightness , contrast , saturation , and hue of any color using the sliders or the arrows.
  12. -
  13. Preview your color scheme on different backgrounds using the tabs at the bottom of the window.
  14. -
  15. Generate color codes for your scheme using the tabs at the top of the window.
  16. -
  17. Save , export , or print your scheme using the buttons at the top right corner of the window.
  18. -
- -

To find and download a color scheme using Color Schemer Studio V2.0 , you need to follow these steps:

- -
    -
  1. Open Color Schemer Studio V2.0 and click on the Find Scheme button.
  2. -
  3. Select a source from which you want to find a scheme , such as website , image , or online scheme library.
  4. -
  5. If you select website , enter the URL of the website and click Go . If you select image , browse your computer and select an image file . If you select online scheme library , browse the categories and select a scheme that you like.
  6. -
  7. Preview the scheme on different backgrounds using the tabs at the bottom of the window.
  8. -
  9. Generate color codes for the scheme using the tabs at the top of the window.
  10. -
  11. Save , export , or print the scheme using the buttons at the top right corner of the window.
  12. -
- -

Why You Need Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen?

- -

Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen is a must-have tool for any designer who works with colors . It can help you create beautiful and harmonious color schemes in minutes . It can also help you find inspiration from other sources , such as websites , images , or online libraries . It can also help you generate color codes for various formats that you can use in your design software or text editor . It can also help you preview your colors on different backgrounds to see how they look in different contexts . It can also help you save time and effort by providing you with all these features in one software .

- -

If you want to take your design skills to the next level , you need Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen . It is a powerful and versatile color software that can help you with all your color needs . Whether you are working on a website , a logo , a flyer , a poster , or any other design project , Color Schemer Studio V2.0 can help you create amazing color schemes in minutes . Download it now and see for yourself how it can improve your design workflow .

-

Where to Download Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen?

- -

If you are interested in downloading Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen , you can find it on various websites that offer software downloads . However , you need to be careful when downloading software from unknown sources , as they may contain viruses , malware , or spyware that can harm your computer or steal your personal information . Therefore , you need to make sure that you download Color Schemer Studio V2.0 from a trusted and reputable website that provides safe and secure downloads .

- -

One of the best websites that you can download Color Schemer Studio V2.0 from is Ko-fi . Ko-fi is a platform where creators can get support from fans through donations , memberships , shop sales , and more . It is also a place where you can find and download various software , such as Color Schemer Studio V2.0 . Ko-fi provides high-quality and virus-free downloads that are verified by the creators themselves . You can also support the creators by buying them a coffee or becoming a member of their page .

- -

To download Color Schemer Studio V2.0 from Ko-fi , you need to follow these steps:

- -
    -
  1. Go to this link: https://ko-fi.com/post/Color-Schemer-Studio-V2-0-h33t-cepuxxx-Serial-D1D5CNOUN
  2. -
  3. Click on the Download button and choose a payment method . You can pay with PayPal , Stripe , or Ko-fi Balance . You can also choose to pay what you want or nothing at all .
  4. -
  5. After completing the payment , you will receive an email with a download link . Click on the link and save the file to your computer .
  6. -
  7. Extract the file using WinRAR or any other file compression software . Then , run the setup file and follow the instructions to install Color Schemer Studio V2.0 .
  8. -
  9. Activate Color Schemer Studio V2.0 using the serial key keygen that is included in the download . To activate it , run the keygen file and generate a serial key . Then , enter the serial key into the registration window of Color Schemer Studio V2.0 and click OK .
  10. -
  11. Enjoy using Color Schemer Studio V2.0 for your design projects .
  12. -
- -

Conclusion

- -

Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen is a great color tool for designers who want to create and edit color schemes , preview colors on different backgrounds , generate color codes for various formats , and find and download color schemes from other sources . It is a powerful and versatile color software that can help you with all your color needs . Whether you are working on a website , a logo , a flyer , a poster , or any other design project , Color Schemer Studio V2.0 can help you create amazing color schemes in minutes . You can download it from Ko-fi and support the creator by buying them a coffee or becoming a member of their page . Download it now and see for yourself how it can improve your design workflow .

-

Conclusion

- -

Color Schemer Studio V2.0 [h33t] [cepuxxx] Serial Key Keygen is a great color tool for designers who want to create and edit color schemes , preview colors on different backgrounds , generate color codes for various formats , and find and download color schemes from other sources . It is a powerful and versatile color software that can help you with all your color needs . Whether you are working on a website , a logo , a flyer , a poster , or any other design project , Color Schemer Studio V2.0 can help you create amazing color schemes in minutes . You can download it from Ko-fi and support the creator by buying them a coffee or becoming a member of their page . Download it now and see for yourself how it can improve your design workflow .

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/jackyccl/segment-anything/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h b/spaces/jackyccl/segment-anything/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h deleted file mode 100644 index b2b88e8c46f19b6db0933163e57ccdb51180f517..0000000000000000000000000000000000000000 --- a/spaces/jackyccl/segment-anything/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h +++ /dev/null @@ -1,35 +0,0 @@ -/*! -************************************************************************************************** -* Deformable DETR -* Copyright (c) 2020 SenseTime. All Rights Reserved. -* Licensed under the Apache License, Version 2.0 [see LICENSE for details] -************************************************************************************************** -* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -************************************************************************************************** -*/ - -#pragma once -#include - -namespace groundingdino { - -at::Tensor -ms_deform_attn_cpu_forward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const int im2col_step); - -std::vector -ms_deform_attn_cpu_backward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const at::Tensor &grad_output, - const int im2col_step); - -} // namespace groundingdino diff --git a/spaces/jackyliang42/code-as-policies/README.md b/spaces/jackyliang42/code-as-policies/README.md deleted file mode 100644 index 0b2df1b05ed8c4e6c3a37924b5800d8d670972d0..0000000000000000000000000000000000000000 --- a/spaces/jackyliang42/code-as-policies/README.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Code As Policies -emoji: 📈 -colorFrom: purple -colorTo: indigo -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -# Code as Policies Tabletop Manipulation Interactive Demo - -This demo is from the paper: - -[Code as Policies: Language Model Programs for Embodied Control](https://code-as-policies.github.io/) - -Below is an interactive demo for the simulated tabletop manipulation domain, seen in the paper section IV.D - -## Preparations -1. Obtain an [OpenAI API Key](https://openai.com/blog/openai-api/) - -## Usage -1. Fill in the API Key and how many blocks and bowls to be spawned in the environment. -2. Click Setup/Reset Simulation -3. Based on the new randomly sampled object names, input an instruction and click Run Instruction. If successful, this will render a video and update the simulation environment visualization. - -You can run instructions in sequence and refer back to previous instructions (e.g. do the same with other blocks, move the same block to the other bowl, etc). To reset, click Setup/Reset Env, and this will clear the current instruction history. - -## Supported Instructions -* Spatial reasoning (e.g. to the left of the red block, the closest corner, the farthest bowl, the second block from the right) -* Sequential actions (e.g. put blocks in matching bowls, stack blocks on the bottom right corner) -* Contextual instructions (e.g. do the same with the blue block, undo that) -* Language-based reasoning (e.g. put the forest-colored block on the ocean-colored bowl). -* Simple Q&A (e.g. how many blocks are to the left of the blue bowl?) - -## Example Instructions -Note object names may need to be changed depending the sampled object names. -* put the sun-colored block on the bowl closest to it -* stack the blocks on the bottom most bowl -* arrange the blocks as a square in the middle -* move the square 5cm to the right -* how many blocks are to the right of the orange bowl? -* pick up the block closest to the top left corner and place it on the bottom right corner - -## Known Limitations -* In simulation we're using ground truth object poses instead of using vision models. This means that instructions the require knowledge of visual apperances (e.g. darkest bowl, largest object, empty bowls) are not supported. -* Currently, the low-level pick place primitive does not do collision checking, so if there are many objects on the table, placing actions may incur collisions. -* The pick place primitive is also unable to pick up bowls. -* Prompt saturation - if too many instructions (10+) are executed in a row, then the LLM may start to ignore examples in the early parts of the prompt. -* Ambiguous instructions - if a given instruction doesn't lead to the desired actions, try rephrasing it to remove ambiguities (e.g. place the block on the closest bowl -> place the block on its closest bowl) -* Maximum token length - you may hit the maximum token length if running multiple commands in sequence. Please reset the simulation when this happens. diff --git a/spaces/james-oldfield/PandA/networks/stylegan3/torch_utils/ops/filtered_lrelu.cpp b/spaces/james-oldfield/PandA/networks/stylegan3/torch_utils/ops/filtered_lrelu.cpp deleted file mode 100644 index ff4149b8b46b54d2f400ae10e44d19f20503ba1f..0000000000000000000000000000000000000000 --- a/spaces/james-oldfield/PandA/networks/stylegan3/torch_utils/ops/filtered_lrelu.cpp +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -#include -#include -#include -#include "filtered_lrelu.h" - -//------------------------------------------------------------------------ - -static std::tuple filtered_lrelu( - torch::Tensor x, torch::Tensor fu, torch::Tensor fd, torch::Tensor b, torch::Tensor si, - int up, int down, int px0, int px1, int py0, int py1, int sx, int sy, float gain, float slope, float clamp, bool flip_filters, bool writeSigns) -{ - // Set CUDA device. - TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); - const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); - - // Validate arguments. - TORCH_CHECK(fu.device() == x.device() && fd.device() == x.device() && b.device() == x.device(), "all input tensors must reside on the same device"); - TORCH_CHECK(fu.dtype() == torch::kFloat && fd.dtype() == torch::kFloat, "fu and fd must be float32"); - TORCH_CHECK(b.dtype() == x.dtype(), "x and b must have the same dtype"); - TORCH_CHECK(x.dtype() == torch::kHalf || x.dtype() == torch::kFloat, "x and b must be float16 or float32"); - TORCH_CHECK(x.dim() == 4, "x must be rank 4"); - TORCH_CHECK(x.size(0) * x.size(1) <= INT_MAX && x.size(2) <= INT_MAX && x.size(3) <= INT_MAX, "x is too large"); - TORCH_CHECK(x.numel() > 0, "x is empty"); - TORCH_CHECK((fu.dim() == 1 || fu.dim() == 2) && (fd.dim() == 1 || fd.dim() == 2), "fu and fd must be rank 1 or 2"); - TORCH_CHECK(fu.size(0) <= INT_MAX && fu.size(-1) <= INT_MAX, "fu is too large"); - TORCH_CHECK(fd.size(0) <= INT_MAX && fd.size(-1) <= INT_MAX, "fd is too large"); - TORCH_CHECK(fu.numel() > 0, "fu is empty"); - TORCH_CHECK(fd.numel() > 0, "fd is empty"); - TORCH_CHECK(b.dim() == 1 && b.size(0) == x.size(1), "b must be a vector with the same number of channels as x"); - TORCH_CHECK(up >= 1 && down >= 1, "up and down must be at least 1"); - - // Figure out how much shared memory is available on the device. - int maxSharedBytes = 0; - AT_CUDA_CHECK(cudaDeviceGetAttribute(&maxSharedBytes, cudaDevAttrMaxSharedMemoryPerBlockOptin, x.device().index())); - int sharedKB = maxSharedBytes >> 10; - - // Populate enough launch parameters to check if a CUDA kernel exists. - filtered_lrelu_kernel_params p; - p.up = up; - p.down = down; - p.fuShape = make_int2((int)fu.size(-1), fu.dim() == 2 ? (int)fu.size(0) : 0); // shape [n, 0] indicates separable filter. - p.fdShape = make_int2((int)fd.size(-1), fd.dim() == 2 ? (int)fd.size(0) : 0); - filtered_lrelu_kernel_spec test_spec = choose_filtered_lrelu_kernel(p, sharedKB); - if (!test_spec.exec) - { - // No kernel found - return empty tensors and indicate missing kernel with return code of -1. - return std::make_tuple(torch::Tensor(), torch::Tensor(), -1); - } - - // Input/output element size. - int64_t sz = (x.dtype() == torch::kHalf) ? 2 : 4; - - // Input sizes. - int64_t xw = (int)x.size(3); - int64_t xh = (int)x.size(2); - int64_t fut_w = (int)fu.size(-1) - 1; - int64_t fut_h = (int)fu.size(0) - 1; - int64_t fdt_w = (int)fd.size(-1) - 1; - int64_t fdt_h = (int)fd.size(0) - 1; - - // Logical size of upsampled buffer. - int64_t cw = xw * up + (px0 + px1) - fut_w; - int64_t ch = xh * up + (py0 + py1) - fut_h; - TORCH_CHECK(cw > fdt_w && ch > fdt_h, "upsampled buffer must be at least the size of downsampling filter"); - TORCH_CHECK(cw <= INT_MAX && ch <= INT_MAX, "upsampled buffer is too large"); - - // Compute output size and allocate. - int64_t yw = (cw - fdt_w + (down - 1)) / down; - int64_t yh = (ch - fdt_h + (down - 1)) / down; - TORCH_CHECK(yw > 0 && yh > 0, "output must be at least 1x1"); - TORCH_CHECK(yw <= INT_MAX && yh <= INT_MAX, "output is too large"); - torch::Tensor y = torch::empty({x.size(0), x.size(1), yh, yw}, x.options(), x.suggest_memory_format()); - - // Allocate sign tensor. - torch::Tensor so; - torch::Tensor s = si; - bool readSigns = !!s.numel(); - int64_t sw_active = 0; // Active width of sign tensor. - if (writeSigns) - { - sw_active = yw * down - (down - 1) + fdt_w; // Active width in elements. - int64_t sh = yh * down - (down - 1) + fdt_h; // Height = active height. - int64_t sw = (sw_active + 15) & ~15; // Width = active width in elements, rounded up to multiple of 16. - TORCH_CHECK(sh <= INT_MAX && (sw >> 2) <= INT_MAX, "signs is too large"); - s = so = torch::empty({x.size(0), x.size(1), sh, sw >> 2}, x.options().dtype(torch::kUInt8), at::MemoryFormat::Contiguous); - } - else if (readSigns) - sw_active = s.size(3) << 2; - - // Validate sign tensor if in use. - if (readSigns || writeSigns) - { - TORCH_CHECK(s.is_contiguous(), "signs must be contiguous"); - TORCH_CHECK(s.dtype() == torch::kUInt8, "signs must be uint8"); - TORCH_CHECK(s.device() == x.device(), "signs must reside on the same device as x"); - TORCH_CHECK(s.dim() == 4, "signs must be rank 4"); - TORCH_CHECK(s.size(0) == x.size(0) && s.size(1) == x.size(1), "signs must have same batch & channels as x"); - TORCH_CHECK(s.size(2) <= INT_MAX && s.size(3) <= INT_MAX, "signs is too large"); - } - - // Populate rest of CUDA kernel parameters. - p.x = x.data_ptr(); - p.y = y.data_ptr(); - p.b = b.data_ptr(); - p.s = (readSigns || writeSigns) ? s.data_ptr() : 0; - p.fu = fu.data_ptr(); - p.fd = fd.data_ptr(); - p.pad0 = make_int2(px0, py0); - p.gain = gain; - p.slope = slope; - p.clamp = clamp; - p.flip = (flip_filters) ? 1 : 0; - p.xShape = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); - p.yShape = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0)); - p.sShape = (readSigns || writeSigns) ? make_int2((int)s.size(3), (int)s.size(2)) : make_int2(0, 0); // Width is in bytes. Contiguous. - p.sOfs = make_int2(sx, sy); - p.swLimit = (sw_active + 3) >> 2; // Rounded up to bytes. - - // x, y, b strides are in bytes. - p.xStride = make_longlong4(sz * x.stride(3), sz * x.stride(2), sz * x.stride(1), sz * x.stride(0)); - p.yStride = make_longlong4(sz * y.stride(3), sz * y.stride(2), sz * y.stride(1), sz * y.stride(0)); - p.bStride = sz * b.stride(0); - - // fu, fd strides are in elements. - p.fuStride = make_longlong3(fu.stride(-1), fu.dim() == 2 ? fu.stride(0) : 0, 0); - p.fdStride = make_longlong3(fd.stride(-1), fd.dim() == 2 ? fd.stride(0) : 0, 0); - - // Determine if indices don't fit in int32. Support negative strides although Torch currently never produces those. - bool index64b = false; - if (std::abs(p.bStride * x.size(1)) > INT_MAX) index64b = true; - if (std::min(x.size(0) * p.xStride.w, 0ll) + std::min(x.size(1) * p.xStride.z, 0ll) + std::min(x.size(2) * p.xStride.y, 0ll) + std::min(x.size(3) * p.xStride.x, 0ll) < -INT_MAX) index64b = true; - if (std::max(x.size(0) * p.xStride.w, 0ll) + std::max(x.size(1) * p.xStride.z, 0ll) + std::max(x.size(2) * p.xStride.y, 0ll) + std::max(x.size(3) * p.xStride.x, 0ll) > INT_MAX) index64b = true; - if (std::min(y.size(0) * p.yStride.w, 0ll) + std::min(y.size(1) * p.yStride.z, 0ll) + std::min(y.size(2) * p.yStride.y, 0ll) + std::min(y.size(3) * p.yStride.x, 0ll) < -INT_MAX) index64b = true; - if (std::max(y.size(0) * p.yStride.w, 0ll) + std::max(y.size(1) * p.yStride.z, 0ll) + std::max(y.size(2) * p.yStride.y, 0ll) + std::max(y.size(3) * p.yStride.x, 0ll) > INT_MAX) index64b = true; - if (s.numel() > INT_MAX) index64b = true; - - // Choose CUDA kernel. - filtered_lrelu_kernel_spec spec = { 0 }; - AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "filtered_lrelu_cuda", [&] - { - if constexpr (sizeof(scalar_t) <= 4) // Exclude doubles. constexpr prevents template instantiation. - { - // Choose kernel based on index type, datatype and sign read/write modes. - if (!index64b && writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if (!index64b && !writeSigns && readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if (!index64b && !writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if ( index64b && writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if ( index64b && !writeSigns && readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if ( index64b && !writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - } - }); - TORCH_CHECK(spec.exec, "internal error - CUDA kernel not found") // This should not happen because we tested earlier that kernel exists. - - // Launch CUDA kernel. - void* args[] = {&p}; - int bx = spec.numWarps * 32; - int gx = (p.yShape.x - 1) / spec.tileOut.x + 1; - int gy = (p.yShape.y - 1) / spec.tileOut.y + 1; - int gz = p.yShape.z * p.yShape.w; - - // Repeat multiple horizontal tiles in a CTA? - if (spec.xrep) - { - p.tilesXrep = spec.xrep; - p.tilesXdim = gx; - - gx = (gx + p.tilesXrep - 1) / p.tilesXrep; - std::swap(gx, gy); - } - else - { - p.tilesXrep = 0; - p.tilesXdim = 0; - } - - // Launch filter setup kernel. - AT_CUDA_CHECK(cudaLaunchKernel(spec.setup, 1, 1024, args, 0, at::cuda::getCurrentCUDAStream())); - - // Copy kernels to constant memory. - if ( writeSigns && !readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); - else if (!writeSigns && readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); - else if (!writeSigns && !readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); - - // Set cache and shared memory configurations for main kernel. - AT_CUDA_CHECK(cudaFuncSetCacheConfig(spec.exec, cudaFuncCachePreferShared)); - if (spec.dynamicSharedKB) // Need dynamically allocated shared memory? - AT_CUDA_CHECK(cudaFuncSetAttribute(spec.exec, cudaFuncAttributeMaxDynamicSharedMemorySize, spec.dynamicSharedKB << 10)); - AT_CUDA_CHECK(cudaFuncSetSharedMemConfig(spec.exec, cudaSharedMemBankSizeFourByte)); - - // Launch main kernel. - const int maxSubGz = 65535; // CUDA maximum for block z dimension. - for (int zofs=0; zofs < gz; zofs += maxSubGz) // Do multiple launches if gz is too big. - { - p.blockZofs = zofs; - int subGz = std::min(maxSubGz, gz - zofs); - AT_CUDA_CHECK(cudaLaunchKernel(spec.exec, dim3(gx, gy, subGz), bx, args, spec.dynamicSharedKB << 10, at::cuda::getCurrentCUDAStream())); - } - - // Done. - return std::make_tuple(y, so, 0); -} - -//------------------------------------------------------------------------ - -static torch::Tensor filtered_lrelu_act(torch::Tensor x, torch::Tensor si, int sx, int sy, float gain, float slope, float clamp, bool writeSigns) -{ - // Set CUDA device. - TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); - const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); - - // Validate arguments. - TORCH_CHECK(x.dim() == 4, "x must be rank 4"); - TORCH_CHECK(x.size(0) * x.size(1) <= INT_MAX && x.size(2) <= INT_MAX && x.size(3) <= INT_MAX, "x is too large"); - TORCH_CHECK(x.numel() > 0, "x is empty"); - TORCH_CHECK(x.dtype() == torch::kHalf || x.dtype() == torch::kFloat || x.dtype() == torch::kDouble, "x must be float16, float32 or float64"); - - // Output signs if we don't have sign input. - torch::Tensor so; - torch::Tensor s = si; - bool readSigns = !!s.numel(); - if (writeSigns) - { - int64_t sw = x.size(3); - sw = (sw + 15) & ~15; // Round to a multiple of 16 for coalescing. - s = so = torch::empty({x.size(0), x.size(1), x.size(2), sw >> 2}, x.options().dtype(torch::kUInt8), at::MemoryFormat::Contiguous); - } - - // Validate sign tensor if in use. - if (readSigns || writeSigns) - { - TORCH_CHECK(s.is_contiguous(), "signs must be contiguous"); - TORCH_CHECK(s.dtype() == torch::kUInt8, "signs must be uint8"); - TORCH_CHECK(s.device() == x.device(), "signs must reside on the same device as x"); - TORCH_CHECK(s.dim() == 4, "signs must be rank 4"); - TORCH_CHECK(s.size(0) == x.size(0) && s.size(1) == x.size(1), "signs must have same batch & channels as x"); - TORCH_CHECK(s.size(2) <= INT_MAX && (s.size(3) << 2) <= INT_MAX, "signs tensor is too large"); - } - - // Initialize CUDA kernel parameters. - filtered_lrelu_act_kernel_params p; - p.x = x.data_ptr(); - p.s = (readSigns || writeSigns) ? s.data_ptr() : 0; - p.gain = gain; - p.slope = slope; - p.clamp = clamp; - p.xShape = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); - p.xStride = make_longlong4(x.stride(3), x.stride(2), x.stride(1), x.stride(0)); - p.sShape = (readSigns || writeSigns) ? make_int2((int)s.size(3) << 2, (int)s.size(2)) : make_int2(0, 0); // Width is in elements. Contiguous. - p.sOfs = make_int2(sx, sy); - - // Choose CUDA kernel. - void* func = 0; - AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "filtered_lrelu_act_cuda", [&] - { - if (writeSigns) - func = choose_filtered_lrelu_act_kernel(); - else if (readSigns) - func = choose_filtered_lrelu_act_kernel(); - else - func = choose_filtered_lrelu_act_kernel(); - }); - TORCH_CHECK(func, "internal error - CUDA kernel not found"); - - // Launch CUDA kernel. - void* args[] = {&p}; - int bx = 128; // 4 warps per block. - - // Logical size of launch = writeSigns ? p.s : p.x - uint32_t gx = writeSigns ? p.sShape.x : p.xShape.x; - uint32_t gy = writeSigns ? p.sShape.y : p.xShape.y; - uint32_t gz = p.xShape.z * p.xShape.w; // Same as in p.sShape if signs are in use. - gx = (gx - 1) / bx + 1; - - // Make sure grid y and z dimensions are within CUDA launch limits. Kernel loops internally to do the rest. - const uint32_t gmax = 65535; - gy = std::min(gy, gmax); - gz = std::min(gz, gmax); - - // Launch. - AT_CUDA_CHECK(cudaLaunchKernel(func, dim3(gx, gy, gz), bx, args, 0, at::cuda::getCurrentCUDAStream())); - return so; -} - -//------------------------------------------------------------------------ - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("filtered_lrelu", &filtered_lrelu); // The whole thing. - m.def("filtered_lrelu_act_", &filtered_lrelu_act); // Activation and sign tensor handling only. Modifies data tensor in-place. -} - -//------------------------------------------------------------------------ diff --git a/spaces/jatin-tech/SkinZen/Dockerfile b/spaces/jatin-tech/SkinZen/Dockerfile deleted file mode 100644 index 8a4d3bc8fa468aae2aec01c391b2410658c97d6a..0000000000000000000000000000000000000000 --- a/spaces/jatin-tech/SkinZen/Dockerfile +++ /dev/null @@ -1,32 +0,0 @@ -FROM python:3.9 - -WORKDIR /code - -COPY ./requirements.txt /code/requirements.txt - -RUN apt-get update && apt-get upgrade -y - -RUN apt-get install -y software-properties-common -RUN apt-get install -y build-essential cmake pkg-config \ - && apt-get install -y libx11-dev libatlas-base-dev \ - && apt-get install -y libgtk-3-dev libboost-python-dev - -RUN pip install --no-cache-dir -r /code/requirements.txt - -# Set up a new user named "user" with user ID 1000 -RUN useradd -m -u 1000 user - -# Switch to the "user" user -USER user - -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -# Copy the current directory contents into the container at $HOME/app setting the owner to the user -COPY --chown=user . $HOME/app - -CMD ["python", "main.py"] \ No newline at end of file diff --git a/spaces/jbilcke-hf/VideoQuest/src/app/interface/renderer/full-screen-button.tsx b/spaces/jbilcke-hf/VideoQuest/src/app/interface/renderer/full-screen-button.tsx deleted file mode 100644 index 2daa33beae917eb150252a10d90f11f9ec5be9c3..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/VideoQuest/src/app/interface/renderer/full-screen-button.tsx +++ /dev/null @@ -1,18 +0,0 @@ -import { cn } from "@/lib/utils" -import { FullScreenIcon } from "../../../components/icons/full-screen" - -export function FullScreenButton() { - return ( -
-
- -
-
- ) -} \ No newline at end of file diff --git a/spaces/jeonchangbin49/De-limiter/test_ddp.py b/spaces/jeonchangbin49/De-limiter/test_ddp.py deleted file mode 100644 index 9e4db2c6799879bfba93e8f62c61d3150f6b8498..0000000000000000000000000000000000000000 --- a/spaces/jeonchangbin49/De-limiter/test_ddp.py +++ /dev/null @@ -1,245 +0,0 @@ -# To be honest... this is not ddp. -import os -import json -import argparse -import glob - -import torch -import tqdm -import musdb -import librosa -import soundfile as sf -import pyloudnorm as pyln -from dotmap import DotMap - -from models import load_model_with_args -from separate_func import ( - conv_tasnet_separate, -) -from utils import str2bool, db2linear - - -tqdm.monitor_interval = 0 - - -def separate_track_with_model( - args, model, device, track_audio, track_name, meter, augmented_gain -): - with torch.no_grad(): - if ( - args.model_loss_params.architecture == "conv_tasnet_mask_on_output" - or args.model_loss_params.architecture == "conv_tasnet" - ): - estimates = conv_tasnet_separate( - args, - model, - device, - track_audio, - track_name, - meter=meter, - augmented_gain=augmented_gain, - ) - - return estimates - - -def main(): - parser = argparse.ArgumentParser(description="model test.py") - - parser.add_argument("--target", type=str, default="all") - parser.add_argument("--data_root", type=str, default="/path/to/musdb_XL") - parser.add_argument( - "--use_musdb", - type=str2bool, - default=True, - help="Use musdb test data or just want to inference other samples?", - ) - parser.add_argument("--exp_name", type=str, default="delimit_6_s') - parser.add_argument("--manual_output_name", type=str, default=None) - parser.add_argument( - "--output_directory", type=str, default="/path/to/results" - ) - parser.add_argument("--use_gpu", type=str2bool, default=True) - parser.add_arugment("--save_name_as_target", type=str2bool, default=True) - parser.add_argument( - "--loudnorm_input_lufs", - type=float, - default=None, - help="If you want to use loudnorm, input target lufs", - ) - parser.add_argument( - "--use_singletrackset", - type=str2bool, - default=False, - help="Use SingleTrackSet for X-UMX", - ) - parser.add_argument( - "--best_model", - type=str2bool, - default=True, - help="Use best model or lastly saved model", - ) - parser.add_argument( - "--save_output_loudnorm", - type=float, - default=None, - help="Save loudness normalized outputs or not. If you want to save, input target loudness", - ) - parser.add_argument( - "--save_mixed_output", - type=float, - default=None, - help="Save original+delimited-estimation mixed output with a ratio of default 0.5 (orginal) and 1 - 0.5 (estimation)", - ) - parser.add_argument( - "--save_16k_mono", - type=str2bool, - default=False, - help="Save 16k mono wav files for FAD evaluation.", - ) - parser.add_argument( - "--save_histogram", - type=str2bool, - default=False, - help="Save histogram of the output. Only valid when the task is 'delimit'", - ) - - args, _ = parser.parse_known_args() - - args.output_dir = f"{args.output_directory}/checkpoint/{args.exp_name}" - with open(f"{args.output_dir}/{args.target}.json", "r") as f: - args_dict = json.load(f) - args_dict = DotMap(args_dict) - - for key, value in args_dict["args"].items(): - if key in list(vars(args).keys()): - pass - else: - setattr(args, key, value) - - args.test_output_dir = f"{args.output_directory}/test/{args.exp_name}" - - if args.manual_output_name != None: - args.test_output_dir = f"{args.output_directory}/test/{args.manual_output_name}" - os.makedirs(args.test_output_dir, exist_ok=True) - - device = torch.device( - "cuda" if torch.cuda.is_available() and args.use_gpu else "cpu" - ) - - ###################### Define Models ###################### - our_model = load_model_with_args(args) - our_model = our_model.to(device) - print(our_model) - pytorch_total_params = sum( - p.numel() for p in our_model.parameters() if p.requires_grad - ) - print("Total number of parameters", pytorch_total_params) - # Future work => Torchinfo would be better for this purpose. - - if args.best_model: - target_model_path = f"{args.output_dir}/{args.target}.pth" - checkpoint = torch.load(target_model_path, map_location=device) - our_model.load_state_dict(checkpoint) - else: # when using lastly saved model - target_model_path = f"{args.output_dir}/{args.target}.chkpnt" - checkpoint = torch.load(target_model_path, map_location=device) - our_model.load_state_dict(checkpoint["state_dict"]) - - our_model.eval() - - meter = pyln.Meter(44100) - - if args.use_musdb: - test_tracks = musdb.DB(root=args.data_root, subsets="test", is_wav=True) - - for track in tqdm.tqdm(test_tracks): - track_name = track.name - track_audio = track.audio - - orig_audio = track_audio.copy() - - augmented_gain = None - print("Now De-limiting : ", track_name) - - if args.loudnorm_input_lufs: # If you want to use loud-normalized input - track_lufs = meter.integrated_loudness(track_audio) - augmented_gain = args.loudnorm_input_lufs - track_lufs - track_audio = track_audio * db2linear(augmented_gain, eps=0.0) - - track_audio = ( - torch.as_tensor(track_audio.T, dtype=torch.float32) - .unsqueeze(0) - .to(device) - ) - - estimates = separate_track_with_model( - args, our_model, device, track_audio, track_name, meter, augmented_gain - ) - - if args.save_mixed_output: - orig_audio = orig_audio.T - track_lufs = meter.integrated_loudness(orig_audio.T) - augmented_gain = args.save_output_loudnorm - track_lufs - orig_audio = orig_audio * db2linear(augmented_gain, eps=0.0) - - mixed_output = orig_audio * args.save_mixed_output + estimates * ( - 1 - args.save_mixed_output - ) - - sf.write( - f"{args.test_output_dir}/{track_name}/{str(args.save_mixed_output)}_mixed.wav", - mixed_output.T, - args.data_params.sample_rate, - ) - else: - test_tracks = glob.glob(f"{args.data_root}/*.wav") + glob.glob( - f"{args.data_root}/*.mp3" - ) - - for track in tqdm.tqdm(test_tracks): - track_name = os.path.basename(track).replace(".wav", "").replace(".mp3", "") - track_audio, sr = librosa.load( - track, sr=None, mono=False - ) # sr should be 44100 - - orig_audio = track_audio.copy() - - if sr != 44100: - raise ValueError("Sample rate should be 44100") - augmented_gain = None - print("Now De-limiting : ", track_name) - - if args.loudnorm_input_lufs: # If you want to use loud-normalized input - track_lufs = meter.integrated_loudness(track_audio.T) - augmented_gain = args.loudnorm_input_lufs - track_lufs - track_audio = track_audio * db2linear(augmented_gain, eps=0.0) - - track_audio = ( - torch.as_tensor(track_audio, dtype=torch.float32) - .unsqueeze(0) - .to(device) - ) - - estimates = separate_track_with_model( - args, our_model, device, track_audio, track_name, meter, augmented_gain - ) - - if args.save_mixed_output: - track_lufs = meter.integrated_loudness(orig_audio.T) - augmented_gain = args.save_output_loudnorm - track_lufs - orig_audio = orig_audio * db2linear(augmented_gain, eps=0.0) - - mixed_output = orig_audio * args.save_mixed_output + estimates * ( - 1 - args.save_mixed_output - ) - - sf.write( - f"{args.test_output_dir}/{track_name}/{track_name}_mixed.wav", - mixed_output.T, - args.data_params.sample_rate, - ) - - -if __name__ == "__main__": - main() diff --git a/spaces/jgurzoni/image_background_swapper/models/ade20k/segm_lib/nn/modules/tests/test_sync_batchnorm.py b/spaces/jgurzoni/image_background_swapper/models/ade20k/segm_lib/nn/modules/tests/test_sync_batchnorm.py deleted file mode 100644 index 45bb3c8cfd36d8f668e6fde756b17587eab72082..0000000000000000000000000000000000000000 --- a/spaces/jgurzoni/image_background_swapper/models/ade20k/segm_lib/nn/modules/tests/test_sync_batchnorm.py +++ /dev/null @@ -1,111 +0,0 @@ -# -*- coding: utf-8 -*- -# File : test_sync_batchnorm.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. - -import unittest - -import torch -import torch.nn as nn -from torch.autograd import Variable - -from sync_batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, DataParallelWithCallback -from sync_batchnorm.unittest import TorchTestCase - - -def handy_var(a, unbias=True): - n = a.size(0) - asum = a.sum(dim=0) - as_sum = (a ** 2).sum(dim=0) # a square sum - sumvar = as_sum - asum * asum / n - if unbias: - return sumvar / (n - 1) - else: - return sumvar / n - - -def _find_bn(module): - for m in module.modules(): - if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, SynchronizedBatchNorm1d, SynchronizedBatchNorm2d)): - return m - - -class SyncTestCase(TorchTestCase): - def _syncParameters(self, bn1, bn2): - bn1.reset_parameters() - bn2.reset_parameters() - if bn1.affine and bn2.affine: - bn2.weight.data.copy_(bn1.weight.data) - bn2.bias.data.copy_(bn1.bias.data) - - def _checkBatchNormResult(self, bn1, bn2, input, is_train, cuda=False): - """Check the forward and backward for the customized batch normalization.""" - bn1.train(mode=is_train) - bn2.train(mode=is_train) - - if cuda: - input = input.cuda() - - self._syncParameters(_find_bn(bn1), _find_bn(bn2)) - - input1 = Variable(input, requires_grad=True) - output1 = bn1(input1) - output1.sum().backward() - input2 = Variable(input, requires_grad=True) - output2 = bn2(input2) - output2.sum().backward() - - self.assertTensorClose(input1.data, input2.data) - self.assertTensorClose(output1.data, output2.data) - self.assertTensorClose(input1.grad, input2.grad) - self.assertTensorClose(_find_bn(bn1).running_mean, _find_bn(bn2).running_mean) - self.assertTensorClose(_find_bn(bn1).running_var, _find_bn(bn2).running_var) - - def testSyncBatchNormNormalTrain(self): - bn = nn.BatchNorm1d(10) - sync_bn = SynchronizedBatchNorm1d(10) - - self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), True) - - def testSyncBatchNormNormalEval(self): - bn = nn.BatchNorm1d(10) - sync_bn = SynchronizedBatchNorm1d(10) - - self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), False) - - def testSyncBatchNormSyncTrain(self): - bn = nn.BatchNorm1d(10, eps=1e-5, affine=False) - sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - - bn.cuda() - sync_bn.cuda() - - self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), True, cuda=True) - - def testSyncBatchNormSyncEval(self): - bn = nn.BatchNorm1d(10, eps=1e-5, affine=False) - sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - - bn.cuda() - sync_bn.cuda() - - self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), False, cuda=True) - - def testSyncBatchNorm2DSyncTrain(self): - bn = nn.BatchNorm2d(10) - sync_bn = SynchronizedBatchNorm2d(10) - sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - - bn.cuda() - sync_bn.cuda() - - self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10, 16, 16), True, cuda=True) - - -if __name__ == '__main__': - unittest.main() diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PyPDF2/pagerange.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PyPDF2/pagerange.py deleted file mode 100644 index f009adc195a860336c783c1a58a184b10c48fa6b..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PyPDF2/pagerange.py +++ /dev/null @@ -1,173 +0,0 @@ -""" -Representation and utils for ranges of PDF file pages. - -Copyright (c) 2014, Steve Witham . -All rights reserved. This software is available under a BSD license; -see https://github.com/py-pdf/PyPDF2/blob/main/LICENSE -""" - -import re -from typing import Any, List, Tuple, Union - -from .errors import ParseError - -_INT_RE = r"(0|-?[1-9]\d*)" # A decimal int, don't allow "-0". -PAGE_RANGE_RE = "^({int}|({int}?(:{int}?(:{int}?)?)))$".format(int=_INT_RE) -# groups: 12 34 5 6 7 8 - - -class PageRange: - """ - A slice-like representation of a range of page indices. - - For example, page numbers, only starting at zero. - - The syntax is like what you would put between brackets [ ]. - The slice is one of the few Python types that can't be subclassed, - but this class converts to and from slices, and allows similar use. - - - PageRange(str) parses a string representing a page range. - - PageRange(slice) directly "imports" a slice. - - to_slice() gives the equivalent slice. - - str() and repr() allow printing. - - indices(n) is like slice.indices(n). - - """ - - def __init__(self, arg: Union[slice, "PageRange", str]) -> None: - """ - Initialize with either a slice -- giving the equivalent page range, - or a PageRange object -- making a copy, - or a string like - "int", "[int]:[int]" or "[int]:[int]:[int]", - where the brackets indicate optional ints. - Remember, page indices start with zero. - Page range expression examples: - : all pages. -1 last page. - 22 just the 23rd page. :-1 all but the last page. - 0:3 the first three pages. -2 second-to-last page. - :3 the first three pages. -2: last two pages. - 5: from the sixth page onward. -3:-1 third & second to last. - The third, "stride" or "step" number is also recognized. - ::2 0 2 4 ... to the end. 3:0:-1 3 2 1 but not 0. - 1:10:2 1 3 5 7 9 2::-1 2 1 0. - ::-1 all pages in reverse order. - Note the difference between this notation and arguments to slice(): - slice(3) means the first three pages; - PageRange("3") means the range of only the fourth page. - However PageRange(slice(3)) means the first three pages. - """ - if isinstance(arg, slice): - self._slice = arg - return - - if isinstance(arg, PageRange): - self._slice = arg.to_slice() - return - - m = isinstance(arg, str) and re.match(PAGE_RANGE_RE, arg) - if not m: - raise ParseError(arg) - elif m.group(2): - # Special case: just an int means a range of one page. - start = int(m.group(2)) - stop = start + 1 if start != -1 else None - self._slice = slice(start, stop) - else: - self._slice = slice(*[int(g) if g else None for g in m.group(4, 6, 8)]) - - @staticmethod - def valid(input: Any) -> bool: - """True if input is a valid initializer for a PageRange.""" - return isinstance(input, (slice, PageRange)) or ( - isinstance(input, str) and bool(re.match(PAGE_RANGE_RE, input)) - ) - - def to_slice(self) -> slice: - """Return the slice equivalent of this page range.""" - return self._slice - - def __str__(self) -> str: - """A string like "1:2:3".""" - s = self._slice - indices: Union[Tuple[int, int], Tuple[int, int, int]] - if s.step is None: - if s.start is not None and s.stop == s.start + 1: - return str(s.start) - - indices = s.start, s.stop - else: - indices = s.start, s.stop, s.step - return ":".join("" if i is None else str(i) for i in indices) - - def __repr__(self) -> str: - """A string like "PageRange('1:2:3')".""" - return "PageRange(" + repr(str(self)) + ")" - - def indices(self, n: int) -> Tuple[int, int, int]: - """ - n is the length of the list of pages to choose from. - - Returns arguments for range(). See help(slice.indices). - """ - return self._slice.indices(n) - - def __eq__(self, other: Any) -> bool: - if not isinstance(other, PageRange): - return False - return self._slice == other._slice - - def __add__(self, other: "PageRange") -> "PageRange": - if not isinstance(other, PageRange): - raise TypeError(f"Can't add PageRange and {type(other)}") - if self._slice.step is not None or other._slice.step is not None: - raise ValueError("Can't add PageRange with stride") - a = self._slice.start, self._slice.stop - b = other._slice.start, other._slice.stop - - if a[0] > b[0]: - a, b = b, a - - # Now a[0] is the smallest - if b[0] > a[1]: - # There is a gap between a and b. - raise ValueError("Can't add PageRanges with gap") - return PageRange(slice(a[0], max(a[1], b[1]))) - - -PAGE_RANGE_ALL = PageRange(":") # The range of all pages. - - -def parse_filename_page_ranges( - args: List[Union[str, PageRange, None]] -) -> List[Tuple[str, PageRange]]: - """ - Given a list of filenames and page ranges, return a list of (filename, page_range) pairs. - - First arg must be a filename; other ags are filenames, page-range - expressions, slice objects, or PageRange objects. - A filename not followed by a page range indicates all pages of the file. - """ - pairs: List[Tuple[str, PageRange]] = [] - pdf_filename = None - did_page_range = False - for arg in args + [None]: - if PageRange.valid(arg): - if not pdf_filename: - raise ValueError( - "The first argument must be a filename, not a page range." - ) - - pairs.append((pdf_filename, PageRange(arg))) - did_page_range = True - else: - # New filename or end of list--do all of the previous file? - if pdf_filename and not did_page_range: - pairs.append((pdf_filename, PAGE_RANGE_ALL)) - - pdf_filename = arg - did_page_range = False - return pairs - - -PageRangeSpec = Union[str, PageRange, Tuple[int, int], Tuple[int, int, int], List[int]] diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/data_structs/__init__.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/data_structs/__init__.py deleted file mode 100644 index 32181901655fb562616784f8dca83a482b9e76ac..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/data_structs/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -"""Init file.""" - -from gpt_index.data_structs.data_structs import ( - IndexDict, - IndexGraph, - IndexList, - KeywordTable, - Node, -) -from gpt_index.data_structs.table import StructDatapoint - -__all__ = [ - "Node", - "IndexGraph", - "KeywordTable", - "IndexList", - "IndexDict", - "StructDatapoint", -] diff --git a/spaces/jone/Music_Source_Separation/bytesep/models/subband_tools/__init__.py b/spaces/jone/Music_Source_Separation/bytesep/models/subband_tools/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/jonjhiggins/MiDaS/app.py b/spaces/jonjhiggins/MiDaS/app.py deleted file mode 100644 index 489938862fb3180daf9c4fe59ef129a8e45a14b0..0000000000000000000000000000000000000000 --- a/spaces/jonjhiggins/MiDaS/app.py +++ /dev/null @@ -1,63 +0,0 @@ -import cv2 -import torch -import gradio as gr -import numpy as np -from PIL import Image - -torch.hub.download_url_to_file('https://images.unsplash.com/photo-1437622368342-7a3d73a34c8f', 'turtle.jpg') -torch.hub.download_url_to_file('https://images.unsplash.com/photo-1519066629447-267fffa62d4b', 'lions.jpg') - -midas = torch.hub.load("intel-isl/MiDaS", "MiDaS") - -use_large_model = True - -if use_large_model: - midas = torch.hub.load("intel-isl/MiDaS", "MiDaS") -else: - midas = torch.hub.load("intel-isl/MiDaS", "MiDaS_small") - -device = "cpu" -midas.to(device) - -midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms") - -if use_large_model: - transform = midas_transforms.default_transform -else: - transform = midas_transforms.small_transform - - -def depth(img): - cv_image = np.array(img) - img = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB) - - input_batch = transform(img).to(device) - with torch.no_grad(): - prediction = midas(input_batch) - - prediction = torch.nn.functional.interpolate( - prediction.unsqueeze(1), - size=img.shape[:2], - mode="bicubic", - align_corners=False, - ).squeeze() - - output = prediction.cpu().numpy() - formatted = (output * 255 / np.max(output)).astype('uint8') - img = Image.fromarray(formatted) - return img - - -inputs = gr.inputs.Image(type='pil', label="Original Image") -outputs = gr.outputs.Image(type="pil",label="Output Image") - -title = "MiDaS" -description = "Gradio demo for MiDaS v2.1 which takes in a single image for computing relative depth. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below." -article = "

Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer | Github Repo

" - -examples = [ - ["turtle.jpg"], - ["lions.jpg"] -] - -gr.Interface(depth, inputs, outputs, title=title, description=description, article=article, examples=examples, analytics_enabled=False).launch() \ No newline at end of file diff --git a/spaces/jordonpeter01/ai-comic-factory/src/app/engine/render.ts b/spaces/jordonpeter01/ai-comic-factory/src/app/engine/render.ts deleted file mode 100644 index 9510e789979f0e45cc28b98bee18aa73c812e7f5..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/ai-comic-factory/src/app/engine/render.ts +++ /dev/null @@ -1,294 +0,0 @@ -"use server" - -import Replicate, { Prediction } from "replicate" - -import { RenderRequest, RenderedScene, RenderingEngine } from "@/types" -import { generateSeed } from "@/lib/generateSeed" -import { sleep } from "@/lib/sleep" - -const renderingEngine = `${process.env.RENDERING_ENGINE || ""}` as RenderingEngine - -const replicateToken = `${process.env.REPLICATE_API_TOKEN || ""}` -const replicateModel = `${process.env.REPLICATE_API_MODEL || ""}` -const replicateModelVersion = `${process.env.REPLICATE_API_MODEL_VERSION || ""}` - -// note: there is no / at the end in the variable -// so we have to add it ourselves if needed -const apiUrl = process.env.VIDEOCHAIN_API_URL - -export async function newRender({ - prompt, - // negativePrompt, - width, - height -}: { - prompt: string - // negativePrompt: string[] - width: number - height: number -}) { - // console.log(`newRender(${prompt})`) - if (!prompt) { - console.error(`cannot call the rendering API without a prompt, aborting..`) - throw new Error(`cannot call the rendering API without a prompt, aborting..`) - } - - let defaulResult: RenderedScene = { - renderId: "", - status: "error", - assetUrl: "", - alt: prompt || "", - maskUrl: "", - error: "failed to fetch the data", - segments: [] - } - - - try { - if (renderingEngine === "REPLICATE") { - if (!replicateToken) { - throw new Error(`you need to configure your REPLICATE_API_TOKEN in order to use the REPLICATE rendering engine`) - } - if (!replicateModel) { - throw new Error(`you need to configure your REPLICATE_API_MODEL in order to use the REPLICATE rendering engine`) - } - if (!replicateModelVersion) { - throw new Error(`you need to configure your REPLICATE_API_MODEL_VERSION in order to use the REPLICATE rendering engine`) - } - const replicate = new Replicate({ auth: replicateToken }) - - // console.log("Calling replicate..") - const seed = generateSeed() - const prediction = await replicate.predictions.create({ - version: replicateModelVersion, - input: { prompt, seed } - }) - - // console.log("prediction:", prediction) - - // no need to reply straight away: good things take time - // also our friends at Replicate won't like it if we spam them with requests - await sleep(4000) - - return { - renderId: prediction.id, - status: "pending", - assetUrl: "", - alt: prompt, - error: prediction.error, - maskUrl: "", - segments: [] - } as RenderedScene - } else { - // console.log(`calling POST ${apiUrl}/render with prompt: ${prompt}`) - const res = await fetch(`${apiUrl}/render`, { - method: "POST", - headers: { - Accept: "application/json", - "Content-Type": "application/json", - Authorization: `Bearer ${process.env.VIDEOCHAIN_API_TOKEN}`, - }, - body: JSON.stringify({ - prompt, - // negativePrompt, unused for now - nbFrames: 1, - nbSteps: 25, // 20 = fast, 30 = better, 50 = best - actionnables: [], // ["text block"], - segmentation: "disabled", // "firstframe", // one day we will remove this param, to make it automatic - width, - height, - - // no need to upscale right now as we generate tiny panels - // maybe later we can provide an "export" button to PDF - // unfortunately there are too many requests for upscaling, - // the server is always down - upscalingFactor: 1, // 2, - - // analyzing doesn't work yet, it seems.. - analyze: false, // analyze: true, - - cache: "ignore" - } as Partial), - cache: 'no-store', - // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache) - // next: { revalidate: 1 } - }) - - - // console.log("res:", res) - // The return value is *not* serialized - // You can return Date, Map, Set, etc. - - // Recommendation: handle errors - if (res.status !== 200) { - // This will activate the closest `error.js` Error Boundary - throw new Error('Failed to fetch data') - } - - const response = (await res.json()) as RenderedScene - return response - } - } catch (err) { - console.error(err) - return defaulResult - } -} - -export async function getRender(renderId: string) { - if (!renderId) { - console.error(`cannot call the rendering API without a renderId, aborting..`) - throw new Error(`cannot call the rendering API without a renderId, aborting..`) - } - - let defaulResult: RenderedScene = { - renderId: "", - status: "pending", - assetUrl: "", - alt: "", - maskUrl: "", - error: "failed to fetch the data", - segments: [] - } - - try { - if (renderingEngine === "REPLICATE") { - if (!replicateToken) { - throw new Error(`you need to configure your REPLICATE_API_TOKEN in order to use the REPLICATE rendering engine`) - } - if (!replicateModel) { - throw new Error(`you need to configure your REPLICATE_API_MODEL in order to use the REPLICATE rendering engine`) - } - - // const replicate = new Replicate({ auth: replicateToken }) - - // console.log("Calling replicate..") - // const prediction = await replicate.predictions.get(renderId) - // console.log("Prediction:", prediction) - - // console.log(`calling GET https://api.replicate.com/v1/predictions/${renderId}`) - const res = await fetch(`https://api.replicate.com/v1/predictions/${renderId}`, { - method: "GET", - headers: { - // Accept: "application/json", - // "Content-Type": "application/json", - Authorization: `Token ${replicateToken}`, - }, - cache: 'no-store', - // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache) - // next: { revalidate: 1 } - }) - - // console.log("res:", res) - // The return value is *not* serialized - // You can return Date, Map, Set, etc. - - // Recommendation: handle errors - if (res.status !== 200) { - // This will activate the closest `error.js` Error Boundary - throw new Error('Failed to fetch data') - } - - const response = (await res.json()) as any - // console.log("response:", response) - - return { - renderId, - status: response?.error ? "error" : response?.status === "succeeded" ? "completed" : "pending", - assetUrl: `${response?.output || ""}`, - alt: `${response?.input?.prompt || ""}`, - error: `${response?.error || ""}`, - maskUrl: "", - segments: [] - } as RenderedScene - } else { - // console.log(`calling GET ${apiUrl}/render with renderId: ${renderId}`) - const res = await fetch(`${apiUrl}/render/${renderId}`, { - method: "GET", - headers: { - Accept: "application/json", - "Content-Type": "application/json", - Authorization: `Bearer ${process.env.VIDEOCHAIN_API_TOKEN}`, - }, - cache: 'no-store', - // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache) - // next: { revalidate: 1 } - }) - - // console.log("res:", res) - // The return value is *not* serialized - // You can return Date, Map, Set, etc. - - // Recommendation: handle errors - if (res.status !== 200) { - // This will activate the closest `error.js` Error Boundary - throw new Error('Failed to fetch data') - } - - const response = (await res.json()) as RenderedScene - // console.log("response:", response) - return response - } - } catch (err) { - console.error(err) - defaulResult.status = "error" - defaulResult.error = `${err}` - // Gorgon.clear(cacheKey) - return defaulResult - } - - // }, cacheDurationInSec * 1000) -} - -export async function upscaleImage(image: string): Promise<{ - assetUrl: string - error: string -}> { - if (!image) { - console.error(`cannot call the rendering API without an image, aborting..`) - throw new Error(`cannot call the rendering API without an image, aborting..`) - } - - let defaulResult = { - assetUrl: "", - error: "failed to fetch the data", - } - - try { - // console.log(`calling GET ${apiUrl}/render with renderId: ${renderId}`) - const res = await fetch(`${apiUrl}/upscale`, { - method: "POST", - headers: { - Accept: "application/json", - "Content-Type": "application/json", - Authorization: `Bearer ${process.env.VIDEOCHAIN_API_TOKEN}`, - }, - cache: 'no-store', - body: JSON.stringify({ image, factor: 3 }) - // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache) - // next: { revalidate: 1 } - }) - - // console.log("res:", res) - // The return value is *not* serialized - // You can return Date, Map, Set, etc. - - // Recommendation: handle errors - if (res.status !== 200) { - // This will activate the closest `error.js` Error Boundary - throw new Error('Failed to fetch data') - } - - const response = (await res.json()) as { - assetUrl: string - error: string - } - // console.log("response:", response) - return response - } catch (err) { - console.error(err) - // Gorgon.clear(cacheKey) - return defaulResult - } - - // }, cacheDurationInSec * 1000) -} diff --git a/spaces/justest/gpt4free/g4f/Provider/Providers/Theb.py b/spaces/justest/gpt4free/g4f/Provider/Providers/Theb.py deleted file mode 100644 index aa43ebc55d74ffaa722fe008424fce97c622a323..0000000000000000000000000000000000000000 --- a/spaces/justest/gpt4free/g4f/Provider/Providers/Theb.py +++ /dev/null @@ -1,28 +0,0 @@ -import os -import json -import time -import subprocess - -from ...typing import sha256, Dict, get_type_hints - -url = 'https://theb.ai' -model = ['gpt-3.5-turbo'] -supports_stream = True -needs_auth = False - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - - path = os.path.dirname(os.path.realpath(__file__)) - config = json.dumps({ - 'messages': messages, - 'model': model}, separators=(',', ':')) - - cmd = ['python3', f'{path}/helpers/theb.py', config] - - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - - for line in iter(p.stdout.readline, b''): - yield line.decode('utf-8') - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/kazuk/youtube-whisper-06/app.py b/spaces/kazuk/youtube-whisper-06/app.py deleted file mode 100644 index 4a61dc561a016c53ad93a3c556b0ef7bafa964eb..0000000000000000000000000000000000000000 --- a/spaces/kazuk/youtube-whisper-06/app.py +++ /dev/null @@ -1,66 +0,0 @@ -import gradio as gr -import whisper -from pytube import YouTube - -def get_audio(url): - yt = YouTube(url) - return yt.streams.filter(only_audio=True)[0].download(filename="tmp.mp4") - -def get_transcript(url, model_size, lang, format): - - model = whisper.load_model(model_size) - - if lang == "None": - lang = None - - result = model.transcribe(get_audio(url), fp16=False, language=lang) - - if format == "None": - return result["text"] - elif format == ".srt": - return format_to_srt(result["segments"]) - -def format_to_srt(segments): - output = "" - for i, segment in enumerate(segments): - output += f"{i + 1}\n" - output += f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n" - output += f"{segment['text']}\n\n" - return output - -def format_timestamp(t): - hh = t//3600 - mm = (t - hh*3600)//60 - ss = t - hh*3600 - mm*60 - mi = (t - int(t))*1000 - return f"{int(hh):02d}:{int(mm):02d}:{int(ss):02d},{int(mi):03d}" - - -langs = ["None"] + sorted(list(whisper.tokenizer.LANGUAGES.values())) -model_size = list(whisper._MODELS.keys()) - -with gr.Blocks() as demo: - - with gr.Row(): - - with gr.Column(): - - with gr.Row(): - url = gr.Textbox(placeholder='Youtube video URL', label='URL') - - with gr.Row(): - - model_size = gr.Dropdown(choices=model_size, value='tiny', label="Model") - lang = gr.Dropdown(choices=langs, value="None", label="Language (Optional)") - format = gr.Dropdown(choices=["None", ".srt"], value="None", label="Timestamps? (Optional)") - - with gr.Row(): - gr.Markdown("Larger models are more accurate, but slower. For 1min video, it'll take ~30s (tiny), ~1min (base), ~3min (small), ~5min (medium), etc.") - transcribe_btn = gr.Button('Transcribe') - - with gr.Column(): - outputs = gr.Textbox(placeholder='Transcription of the video', label='Transcription') - - transcribe_btn.click(get_transcript, inputs=[url, model_size, lang, format], outputs=outputs) - -demo.launch(debug=True) diff --git a/spaces/kbora/minerva-generate-docker/blocks/utils/__init__.py b/spaces/kbora/minerva-generate-docker/blocks/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/keithhon/Real-Time-Voice-Cloning/encoder/config.py b/spaces/keithhon/Real-Time-Voice-Cloning/encoder/config.py deleted file mode 100644 index 1c21312f3de971bfa008254c6035cebc09f05e4c..0000000000000000000000000000000000000000 --- a/spaces/keithhon/Real-Time-Voice-Cloning/encoder/config.py +++ /dev/null @@ -1,45 +0,0 @@ -librispeech_datasets = { - "train": { - "clean": ["LibriSpeech/train-clean-100", "LibriSpeech/train-clean-360"], - "other": ["LibriSpeech/train-other-500"] - }, - "test": { - "clean": ["LibriSpeech/test-clean"], - "other": ["LibriSpeech/test-other"] - }, - "dev": { - "clean": ["LibriSpeech/dev-clean"], - "other": ["LibriSpeech/dev-other"] - }, -} -libritts_datasets = { - "train": { - "clean": ["LibriTTS/train-clean-100", "LibriTTS/train-clean-360"], - "other": ["LibriTTS/train-other-500"] - }, - "test": { - "clean": ["LibriTTS/test-clean"], - "other": ["LibriTTS/test-other"] - }, - "dev": { - "clean": ["LibriTTS/dev-clean"], - "other": ["LibriTTS/dev-other"] - }, -} -voxceleb_datasets = { - "voxceleb1" : { - "train": ["VoxCeleb1/wav"], - "test": ["VoxCeleb1/test_wav"] - }, - "voxceleb2" : { - "train": ["VoxCeleb2/dev/aac"], - "test": ["VoxCeleb2/test_wav"] - } -} - -other_datasets = [ - "LJSpeech-1.1", - "VCTK-Corpus/wav48", -] - -anglophone_nationalites = ["australia", "canada", "ireland", "uk", "usa"] diff --git a/spaces/keras-dreambooth/piranesi-monument-art/app.py b/spaces/keras-dreambooth/piranesi-monument-art/app.py deleted file mode 100644 index 2a8a0d0e9581aec7f3c7f0dc80c81a279f1a54b7..0000000000000000000000000000000000000000 --- a/spaces/keras-dreambooth/piranesi-monument-art/app.py +++ /dev/null @@ -1,52 +0,0 @@ -from huggingface_hub import from_pretrained_keras -import keras_cv -import gradio as gr -from tensorflow import keras - -keras.mixed_precision.set_global_policy("mixed_float16") -# load keras model -resolution = 512 -dreambooth_model = keras_cv.models.StableDiffusion( - img_width=resolution, img_height=resolution, jit_compile=True, - ) -loaded_diffusion_model = from_pretrained_keras("keras-dreambooth/dreambooth-piranesi") -dreambooth_model._diffusion_model = loaded_diffusion_model - - -def generate_images(prompt: str, negative_prompt:str, num_imgs_to_gen: int, num_steps: int, ugs: int): - generated_img = dreambooth_model.text_to_image( - prompt, - negative_prompt=negative_prompt, - batch_size=num_imgs_to_gen, - num_steps=num_steps, - unconditional_guidance_scale=ugs, - ) - - return generated_img - -with gr.Blocks() as demo: - gr.HTML("

Dreambooth Piranesi Art

") - with gr.Row(): - with gr.Column(): - prompt = gr.Textbox(lines=1, value="image of monument in sks style", label="Base Prompt") - negative_prompt = gr.Textbox(lines=1, value="deformed", label="Negative Prompt") - samples = gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of Image") - num_steps = gr.Slider(label="Inference Steps",value=40) - ugs = gr.Slider(value=15, minimum=5, maximum=25, step=1, label="Unconditional Guidance Scale") - run = gr.Button(value="Run") - with gr.Column(): - gallery = gr.Gallery(label="Outputs").style(grid=(1,2)) - - run.click(generate_images, inputs=[prompt,negative_prompt, samples, num_steps, ugs], outputs=gallery) - - gr.Examples([["image of monument in sks style, 8k, high quality, old paper","colored, deformed, blurry, grain, artifacts, low quality", 1, 30, 18], - ["image of menhir in sks style, 8k, high quality, old paper","colored, deformed, blurry, grain, artifacts, low quality", 1, 40, 20], - ["image of church in sks style, 8k, high quality, old paper","colored, deformed, blurry, grain, artifacts, low quality", 1, 40, 20], - ["image of ancient ruins in sks style, 8k, high quality, old paper","colored, deformed, blurry, grain, artifacts, low quality", 1, 50, 20], - ["image of castle on hilltop in sks style, 8k, high quality, old paper","colored, deformed, blurry, grain, artifacts, low quality", 1, 50, 10], - ["image of amphiteater in sks style, 8k, high quality, old paper","colored, deformed, blurry, grain, artifacts, low quality", 1, 40, 9], - ["image of church in lake in sks style, 8k, high quality, old paper, black and white","colored, deformed, blurry, grain, artifacts, low quality", 1, 40, 18], - ["image of village on hilltop with citadel in sks style, 8k, high quality, old paper, black and white","colored, deformed, blurry, grain, artifacts, low quality", 1, 40, 18]], - [prompt,negative_prompt, samples,num_steps, ugs], gallery, generate_images) - -demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/kevinwang676/Bert-VITS2/start.bat b/spaces/kevinwang676/Bert-VITS2/start.bat deleted file mode 100644 index 418d21233dbf720b0dd09821904d9d6a31b123a2..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/Bert-VITS2/start.bat +++ /dev/null @@ -1,2 +0,0 @@ -set PYTHON=venv\python.exe -start cmd /k "set PYTHON=%PYTHON%" \ No newline at end of file diff --git a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/speaker_encoder/data_objects/__init__.py b/spaces/kevinwang676/ChatGLM2-VC-SadTalker/speaker_encoder/data_objects/__init__.py deleted file mode 100644 index 030317a1d9a328d452bf29bc7a802e29629b1a42..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/speaker_encoder/data_objects/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from speaker_encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataset -from speaker_encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataLoader diff --git a/spaces/kevinwang676/VoiceChanger/src/facerender/sync_batchnorm/batchnorm.py b/spaces/kevinwang676/VoiceChanger/src/facerender/sync_batchnorm/batchnorm.py deleted file mode 100644 index 5f4e763f0366dffa10320116413f8c7181a8aeb1..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VoiceChanger/src/facerender/sync_batchnorm/batchnorm.py +++ /dev/null @@ -1,315 +0,0 @@ -# -*- coding: utf-8 -*- -# File : batchnorm.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import collections - -import torch -import torch.nn.functional as F - -from torch.nn.modules.batchnorm import _BatchNorm -from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast - -from .comm import SyncMaster - -__all__ = ['SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d'] - - -def _sum_ft(tensor): - """sum over the first and last dimention""" - return tensor.sum(dim=0).sum(dim=-1) - - -def _unsqueeze_ft(tensor): - """add new dementions at the front and the tail""" - return tensor.unsqueeze(0).unsqueeze(-1) - - -_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size']) -_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std']) - - -class _SynchronizedBatchNorm(_BatchNorm): - def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True): - super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine) - - self._sync_master = SyncMaster(self._data_parallel_master) - - self._is_parallel = False - self._parallel_id = None - self._slave_pipe = None - - def forward(self, input): - # If it is not parallel computation or is in evaluation mode, use PyTorch's implementation. - if not (self._is_parallel and self.training): - return F.batch_norm( - input, self.running_mean, self.running_var, self.weight, self.bias, - self.training, self.momentum, self.eps) - - # Resize the input to (B, C, -1). - input_shape = input.size() - input = input.view(input.size(0), self.num_features, -1) - - # Compute the sum and square-sum. - sum_size = input.size(0) * input.size(2) - input_sum = _sum_ft(input) - input_ssum = _sum_ft(input ** 2) - - # Reduce-and-broadcast the statistics. - if self._parallel_id == 0: - mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size)) - else: - mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size)) - - # Compute the output. - if self.affine: - # MJY:: Fuse the multiplication for speed. - output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias) - else: - output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std) - - # Reshape it. - return output.view(input_shape) - - def __data_parallel_replicate__(self, ctx, copy_id): - self._is_parallel = True - self._parallel_id = copy_id - - # parallel_id == 0 means master device. - if self._parallel_id == 0: - ctx.sync_master = self._sync_master - else: - self._slave_pipe = ctx.sync_master.register_slave(copy_id) - - def _data_parallel_master(self, intermediates): - """Reduce the sum and square-sum, compute the statistics, and broadcast it.""" - - # Always using same "device order" makes the ReduceAdd operation faster. - # Thanks to:: Tete Xiao (http://tetexiao.com/) - intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device()) - - to_reduce = [i[1][:2] for i in intermediates] - to_reduce = [j for i in to_reduce for j in i] # flatten - target_gpus = [i[1].sum.get_device() for i in intermediates] - - sum_size = sum([i[1].sum_size for i in intermediates]) - sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce) - mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size) - - broadcasted = Broadcast.apply(target_gpus, mean, inv_std) - - outputs = [] - for i, rec in enumerate(intermediates): - outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2]))) - - return outputs - - def _compute_mean_std(self, sum_, ssum, size): - """Compute the mean and standard-deviation with sum and square-sum. This method - also maintains the moving average on the master device.""" - assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.' - mean = sum_ / size - sumvar = ssum - sum_ * mean - unbias_var = sumvar / (size - 1) - bias_var = sumvar / size - - self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data - self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data - - return mean, bias_var.clamp(self.eps) ** -0.5 - - -class SynchronizedBatchNorm1d(_SynchronizedBatchNorm): - r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a - mini-batch. - - .. math:: - - y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta - - This module differs from the built-in PyTorch BatchNorm1d as the mean and - standard-deviation are reduced across all devices during training. - - For example, when one uses `nn.DataParallel` to wrap the network during - training, PyTorch's implementation normalize the tensor on each device using - the statistics only on that device, which accelerated the computation and - is also easy to implement, but the statistics might be inaccurate. - Instead, in this synchronized version, the statistics will be computed - over all training samples distributed on multiple devices. - - Note that, for one-GPU or CPU-only case, this module behaves exactly same - as the built-in PyTorch implementation. - - The mean and standard-deviation are calculated per-dimension over - the mini-batches and gamma and beta are learnable parameter vectors - of size C (where C is the input size). - - During training, this layer keeps a running estimate of its computed mean - and variance. The running sum is kept with a default momentum of 0.1. - - During evaluation, this running mean/variance is used for normalization. - - Because the BatchNorm is done over the `C` dimension, computing statistics - on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm - - Args: - num_features: num_features from an expected input of size - `batch_size x num_features [x width]` - eps: a value added to the denominator for numerical stability. - Default: 1e-5 - momentum: the value used for the running_mean and running_var - computation. Default: 0.1 - affine: a boolean value that when set to ``True``, gives the layer learnable - affine parameters. Default: ``True`` - - Shape: - - Input: :math:`(N, C)` or :math:`(N, C, L)` - - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input) - - Examples: - >>> # With Learnable Parameters - >>> m = SynchronizedBatchNorm1d(100) - >>> # Without Learnable Parameters - >>> m = SynchronizedBatchNorm1d(100, affine=False) - >>> input = torch.autograd.Variable(torch.randn(20, 100)) - >>> output = m(input) - """ - - def _check_input_dim(self, input): - if input.dim() != 2 and input.dim() != 3: - raise ValueError('expected 2D or 3D input (got {}D input)' - .format(input.dim())) - super(SynchronizedBatchNorm1d, self)._check_input_dim(input) - - -class SynchronizedBatchNorm2d(_SynchronizedBatchNorm): - r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch - of 3d inputs - - .. math:: - - y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta - - This module differs from the built-in PyTorch BatchNorm2d as the mean and - standard-deviation are reduced across all devices during training. - - For example, when one uses `nn.DataParallel` to wrap the network during - training, PyTorch's implementation normalize the tensor on each device using - the statistics only on that device, which accelerated the computation and - is also easy to implement, but the statistics might be inaccurate. - Instead, in this synchronized version, the statistics will be computed - over all training samples distributed on multiple devices. - - Note that, for one-GPU or CPU-only case, this module behaves exactly same - as the built-in PyTorch implementation. - - The mean and standard-deviation are calculated per-dimension over - the mini-batches and gamma and beta are learnable parameter vectors - of size C (where C is the input size). - - During training, this layer keeps a running estimate of its computed mean - and variance. The running sum is kept with a default momentum of 0.1. - - During evaluation, this running mean/variance is used for normalization. - - Because the BatchNorm is done over the `C` dimension, computing statistics - on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm - - Args: - num_features: num_features from an expected input of - size batch_size x num_features x height x width - eps: a value added to the denominator for numerical stability. - Default: 1e-5 - momentum: the value used for the running_mean and running_var - computation. Default: 0.1 - affine: a boolean value that when set to ``True``, gives the layer learnable - affine parameters. Default: ``True`` - - Shape: - - Input: :math:`(N, C, H, W)` - - Output: :math:`(N, C, H, W)` (same shape as input) - - Examples: - >>> # With Learnable Parameters - >>> m = SynchronizedBatchNorm2d(100) - >>> # Without Learnable Parameters - >>> m = SynchronizedBatchNorm2d(100, affine=False) - >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45)) - >>> output = m(input) - """ - - def _check_input_dim(self, input): - if input.dim() != 4: - raise ValueError('expected 4D input (got {}D input)' - .format(input.dim())) - super(SynchronizedBatchNorm2d, self)._check_input_dim(input) - - -class SynchronizedBatchNorm3d(_SynchronizedBatchNorm): - r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch - of 4d inputs - - .. math:: - - y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta - - This module differs from the built-in PyTorch BatchNorm3d as the mean and - standard-deviation are reduced across all devices during training. - - For example, when one uses `nn.DataParallel` to wrap the network during - training, PyTorch's implementation normalize the tensor on each device using - the statistics only on that device, which accelerated the computation and - is also easy to implement, but the statistics might be inaccurate. - Instead, in this synchronized version, the statistics will be computed - over all training samples distributed on multiple devices. - - Note that, for one-GPU or CPU-only case, this module behaves exactly same - as the built-in PyTorch implementation. - - The mean and standard-deviation are calculated per-dimension over - the mini-batches and gamma and beta are learnable parameter vectors - of size C (where C is the input size). - - During training, this layer keeps a running estimate of its computed mean - and variance. The running sum is kept with a default momentum of 0.1. - - During evaluation, this running mean/variance is used for normalization. - - Because the BatchNorm is done over the `C` dimension, computing statistics - on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm - or Spatio-temporal BatchNorm - - Args: - num_features: num_features from an expected input of - size batch_size x num_features x depth x height x width - eps: a value added to the denominator for numerical stability. - Default: 1e-5 - momentum: the value used for the running_mean and running_var - computation. Default: 0.1 - affine: a boolean value that when set to ``True``, gives the layer learnable - affine parameters. Default: ``True`` - - Shape: - - Input: :math:`(N, C, D, H, W)` - - Output: :math:`(N, C, D, H, W)` (same shape as input) - - Examples: - >>> # With Learnable Parameters - >>> m = SynchronizedBatchNorm3d(100) - >>> # Without Learnable Parameters - >>> m = SynchronizedBatchNorm3d(100, affine=False) - >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10)) - >>> output = m(input) - """ - - def _check_input_dim(self, input): - if input.dim() != 5: - raise ValueError('expected 5D input (got {}D input)' - .format(input.dim())) - super(SynchronizedBatchNorm3d, self)._check_input_dim(input) diff --git a/spaces/khaclinh/self-driving-anonymization/pp4av_exp.py b/spaces/khaclinh/self-driving-anonymization/pp4av_exp.py deleted file mode 100644 index 1c245ed8f45319a69e675305e7e79984c7cff702..0000000000000000000000000000000000000000 --- a/spaces/khaclinh/self-driving-anonymization/pp4av_exp.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -# Copyright (c) Megvii, Inc. and its affiliates. - -import os - -from yolox.exp import Exp as MyExp - -class Exp(MyExp): - def __init__(self): - super(Exp, self).__init__() - self.depth = 1.0 # indicate size yolo model - self.width = 1.0 # - self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] - - self.data_dir = '' - self.train_ann = '' - self.val_ann = '' - self.test_ann = '' - - self.num_classes = 2 - self.data_num_workers = 32 # number of cpu for splitting batch - - self.input_size = (800, 800) - self.print_interval = 100 - self.eval_interval = 1 - self.test_size = (800, 800) - self.enable_mixup = True - self.mosaic_scale = (0.5, 1.5) - self.max_epoch = 300 - self.hsv_prob = 1.0 - - self.degrees = 20.0 - self.translate = 0.2 - self.shear = 2.0 - # Turn off mosaic - self.mosaic_prob = 1.0 - # Turn off Mixup - self.mixup_prob = 1.0 - # Change SGD by ADAM - - - self.basic_lr_per_img = 0.01 / 28.0 - self.no_aug_epochs = 15 - self.min_lr_ratio = 0.05 - self.ema = True - - self.nmsthre = 0.3 diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/visualization/image.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/visualization/image.py deleted file mode 100644 index 61a56c75b67f593c298408462c63c0468be8e276..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/visualization/image.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import cv2 -import numpy as np - -from annotator.uniformer.mmcv.image import imread, imwrite -from .color import color_val - - -def imshow(img, win_name='', wait_time=0): - """Show an image. - - Args: - img (str or ndarray): The image to be displayed. - win_name (str): The window name. - wait_time (int): Value of waitKey param. - """ - cv2.imshow(win_name, imread(img)) - if wait_time == 0: # prevent from hanging if windows was closed - while True: - ret = cv2.waitKey(1) - - closed = cv2.getWindowProperty(win_name, cv2.WND_PROP_VISIBLE) < 1 - # if user closed window or if some key pressed - if closed or ret != -1: - break - else: - ret = cv2.waitKey(wait_time) - - -def imshow_bboxes(img, - bboxes, - colors='green', - top_k=-1, - thickness=1, - show=True, - win_name='', - wait_time=0, - out_file=None): - """Draw bboxes on an image. - - Args: - img (str or ndarray): The image to be displayed. - bboxes (list or ndarray): A list of ndarray of shape (k, 4). - colors (list[str or tuple or Color]): A list of colors. - top_k (int): Plot the first k bboxes only if set positive. - thickness (int): Thickness of lines. - show (bool): Whether to show the image. - win_name (str): The window name. - wait_time (int): Value of waitKey param. - out_file (str, optional): The filename to write the image. - - Returns: - ndarray: The image with bboxes drawn on it. - """ - img = imread(img) - img = np.ascontiguousarray(img) - - if isinstance(bboxes, np.ndarray): - bboxes = [bboxes] - if not isinstance(colors, list): - colors = [colors for _ in range(len(bboxes))] - colors = [color_val(c) for c in colors] - assert len(bboxes) == len(colors) - - for i, _bboxes in enumerate(bboxes): - _bboxes = _bboxes.astype(np.int32) - if top_k <= 0: - _top_k = _bboxes.shape[0] - else: - _top_k = min(top_k, _bboxes.shape[0]) - for j in range(_top_k): - left_top = (_bboxes[j, 0], _bboxes[j, 1]) - right_bottom = (_bboxes[j, 2], _bboxes[j, 3]) - cv2.rectangle( - img, left_top, right_bottom, colors[i], thickness=thickness) - - if show: - imshow(img, win_name, wait_time) - if out_file is not None: - imwrite(img, out_file) - return img - - -def imshow_det_bboxes(img, - bboxes, - labels, - class_names=None, - score_thr=0, - bbox_color='green', - text_color='green', - thickness=1, - font_scale=0.5, - show=True, - win_name='', - wait_time=0, - out_file=None): - """Draw bboxes and class labels (with scores) on an image. - - Args: - img (str or ndarray): The image to be displayed. - bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or - (n, 5). - labels (ndarray): Labels of bboxes. - class_names (list[str]): Names of each classes. - score_thr (float): Minimum score of bboxes to be shown. - bbox_color (str or tuple or :obj:`Color`): Color of bbox lines. - text_color (str or tuple or :obj:`Color`): Color of texts. - thickness (int): Thickness of lines. - font_scale (float): Font scales of texts. - show (bool): Whether to show the image. - win_name (str): The window name. - wait_time (int): Value of waitKey param. - out_file (str or None): The filename to write the image. - - Returns: - ndarray: The image with bboxes drawn on it. - """ - assert bboxes.ndim == 2 - assert labels.ndim == 1 - assert bboxes.shape[0] == labels.shape[0] - assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5 - img = imread(img) - img = np.ascontiguousarray(img) - - if score_thr > 0: - assert bboxes.shape[1] == 5 - scores = bboxes[:, -1] - inds = scores > score_thr - bboxes = bboxes[inds, :] - labels = labels[inds] - - bbox_color = color_val(bbox_color) - text_color = color_val(text_color) - - for bbox, label in zip(bboxes, labels): - bbox_int = bbox.astype(np.int32) - left_top = (bbox_int[0], bbox_int[1]) - right_bottom = (bbox_int[2], bbox_int[3]) - cv2.rectangle( - img, left_top, right_bottom, bbox_color, thickness=thickness) - label_text = class_names[ - label] if class_names is not None else f'cls {label}' - if len(bbox) > 4: - label_text += f'|{bbox[-1]:.02f}' - cv2.putText(img, label_text, (bbox_int[0], bbox_int[1] - 2), - cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color) - - if show: - imshow(img, win_name, wait_time) - if out_file is not None: - imwrite(img, out_file) - return img diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/themes/utils/__init__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/themes/utils/__init__.py deleted file mode 100644 index a3e6208634fafa416b9323f5156ac56dd7bb3700..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/themes/utils/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from .semver_match import ( - ThemeAsset, - get_matching_version, - get_theme_assets, -) - -__all__ = [ - "ThemeAsset", - "get_theme_assets", - "get_matching_version", -] diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/themes/utils/fonts.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/themes/utils/fonts.py deleted file mode 100644 index d51dbbfdf4990358e9094cc887c47ae6cd8b0440..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/themes/utils/fonts.py +++ /dev/null @@ -1,50 +0,0 @@ -from __future__ import annotations - -import json -from typing import Iterable - - -class FontEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, Font): - return { - "__gradio_font__": True, - "name": obj.name, - "class": "google" if isinstance(obj, GoogleFont) else "font", - } - # Let the base class default method raise the TypeError - return json.JSONEncoder.default(self, obj) - - -def as_font(dct): - if "__gradio_font__" in dct: - name = dct["name"] - return GoogleFont(name) if dct["class"] == "google" else Font(name) - return dct - - -class Font: - def __init__(self, name: str): - self.name = name - - def __str__(self) -> str: - return ( - self.name - if self.name in ["sans-serif", "serif", "monospace", "cursive", "fantasy"] - else f"'{self.name}'" - ) - - def stylesheet(self) -> str: - return None - - def __eq__(self, other: Font) -> bool: - return self.name == other.name and self.stylesheet() == other.stylesheet() - - -class GoogleFont(Font): - def __init__(self, name: str, weights: Iterable[int] = (400, 600)): - self.name = name - self.weights = weights - - def stylesheet(self) -> str: - return f'https://fonts.googleapis.com/css2?family={self.name.replace(" ", "+")}:wght@{";".join(str(weight) for weight in self.weights)}&display=swap' diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/axes/__init__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/axes/__init__.py deleted file mode 100644 index f8c40889bce7ec9b9645011b5e2ee8db37464b6a..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/axes/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -from . import _base -from ._axes import * - -# Backcompat. -from ._axes import Axes as Subplot - - -class _SubplotBaseMeta(type): - def __instancecheck__(self, obj): - return (isinstance(obj, _base._AxesBase) - and obj.get_subplotspec() is not None) - - -class SubplotBase(metaclass=_SubplotBaseMeta): - pass - - -def subplot_class_factory(cls): return cls diff --git a/spaces/leogabraneth/text-generation-webui-main/extensions/superboogav2/benchmark.py b/spaces/leogabraneth/text-generation-webui-main/extensions/superboogav2/benchmark.py deleted file mode 100644 index 46475a088b0eca137f641935d58dbf4b8d50ed29..0000000000000000000000000000000000000000 --- a/spaces/leogabraneth/text-generation-webui-main/extensions/superboogav2/benchmark.py +++ /dev/null @@ -1,72 +0,0 @@ -""" -This module implements a benchmark function to evaluate the performance of the embedding pipeline. It expects a configuration JSON file. It must have questions and expected retrieved text. -For each question, it's essential to have variants of that question. Language is fluid and each person might have their own spin on how they may ask it. - -At the end, it will save the results inside a benchmark_{sysdate}.txt file in the main directory. - -The benchmark function will return the score as an integer. -""" -import datetime -import json -import os - -from pathlib import Path - -from .data_processor import process_and_add_to_collector, preprocess_text -from .parameters import get_chunk_count, get_max_token_count -from .utils import create_metadata_source - -def benchmark(config_path, collector): - # Get the current system date - sysdate = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") - filename = f"benchmark_{sysdate}.txt" - - # Open the log file in append mode - with open(filename, 'a') as log: - with open(config_path, 'r') as f: - data = json.load(f) - - total_points = 0 - max_points = 0 - - for item in data: - filepath = item["text"] - corpus = "" - - # Check if the file exists - if os.path.isfile(Path(filepath)): - # Open the file and read its content - with open(Path(filepath), 'r') as file: - corpus = file.read() - process_and_add_to_collector(corpus, collector, True, create_metadata_source('benchmark')) - else: - raise f'Cannot find specified file {filepath}.' - - for question_group in item["questions"]: - question_variants = question_group["question_variants"] - criteria = question_group["criteria"] - - for q in question_variants: - max_points += len(criteria) - processed_text = preprocess_text(q) - - # Get the most similar chunks - results = collector.get_sorted_by_dist(processed_text, n_results=get_chunk_count(), max_token_count=get_max_token_count()) - - points = 0 - - for c in criteria: - for p in results: - if c in p: - points += 1 - total_points += 1 - break - - info = f"The question '{q}' scored {points}/{len(criteria)} points." - print(info, file=log) - - print('\n---\n', file=log) - - print(f'##Total points:\n\n{total_points}/{max_points}', file=log) - - return total_points, max_points \ No newline at end of file diff --git a/spaces/leopoldmaillard/ImageRetrieval/README.md b/spaces/leopoldmaillard/ImageRetrieval/README.md deleted file mode 100644 index 31be0ff5aabc5e21cdc576947e403e27f9d09d39..0000000000000000000000000000000000000000 --- a/spaces/leopoldmaillard/ImageRetrieval/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Image_Retrieval -emoji: 🐠 -colorFrom: purple -colorTo: pink -sdk: gradio -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/lewiswu1209/MockingBird/vocoder/fregan/loss.py b/spaces/lewiswu1209/MockingBird/vocoder/fregan/loss.py deleted file mode 100644 index e37dc64e29446ecdd9dce03290f4e0eba58fb3d7..0000000000000000000000000000000000000000 --- a/spaces/lewiswu1209/MockingBird/vocoder/fregan/loss.py +++ /dev/null @@ -1,35 +0,0 @@ -import torch - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - loss += torch.mean(torch.abs(rl - gl)) - - return loss*2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - l = torch.mean((1-dg)**2) - gen_losses.append(l) - loss += l - - return loss, gen_losses \ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Autocad 2012 Indir 64 Bit Gezginler Linkliste Grafikbesc.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Autocad 2012 Indir 64 Bit Gezginler Linkliste Grafikbesc.md deleted file mode 100644 index daad619f98fc017851a10427dc09f559192319ee..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Autocad 2012 Indir 64 Bit Gezginler Linkliste Grafikbesc.md +++ /dev/null @@ -1,6 +0,0 @@ -

Autocad 2012 Indir 64 Bit Gezginler linkliste grafikbesc


Download Zip ✺✺✺ https://bytlly.com/2uGwKO



-
-I can provide you with download links for AutoCAD 2012 but I have to mention that AutoCAD ... AutoCAD 2012 32-bits · AutoCAD 2012 64-bits. 4d29de3e1b
-
-
-

diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/BusyWin-14-Rel-2-0-With-Patch.md b/spaces/lincquiQcaudo/Top-20-Diffusion/BusyWin-14-Rel-2-0-With-Patch.md deleted file mode 100644 index f801cbc3d5628490430d3f94578b7416bd16fe02..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/BusyWin-14-Rel-2-0-With-Patch.md +++ /dev/null @@ -1,10 +0,0 @@ -

BusyWin-14-Rel-2-0-With-Patch


DOWNLOAD ⚙⚙⚙ https://bytlly.com/2uGyya



-
-When using PaperCut NG/MF versi on 21.1.2 and the hidden document names feature in the print provider is enabled, print jobs submitted for release may hang... When using PaperCut NG/MF versi on 21.1.2 or higher and the hidden document names feature in the print provider is enabled, print jobs submitted for release may hang. -To resolve this issue, click Cancel to cancel the job, and then try printing elsewhere. -(See also the printer's User's Guide for more information about resolving this issue.) -Elimination of possible causes -1. Check connected printers. 8a78ff9644
-
-
-

diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Captaintsubasa3hackdownload.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Captaintsubasa3hackdownload.md deleted file mode 100644 index 02c5e76d7f674ada4172a2a3fa3e5dc09c01dc35..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Captaintsubasa3hackdownload.md +++ /dev/null @@ -1,24 +0,0 @@ -
-

How to Download Captain Tsubasa 3 Hack for SNES

-

Captain Tsubasa 3 is a soccer game based on the popular manga and anime series of the same name. The game features many characters and teams from the original story, as well as some new ones. The game is known for its fast-paced and exciting gameplay, as well as its special moves and animations.

-

However, some fans of the game have created a hack version of Captain Tsubasa 3 that modifies some aspects of the original game, such as the difficulty level, the stats of the players, the graphics, and the music. The hack version also adds some new features, such as a save function, a practice mode, and a custom team editor.

-

captaintsubasa3hackdownload


Downloadhttps://bytlly.com/2uGy7A



-

If you want to try out this hack version of Captain Tsubasa 3, you will need a few things:

-
    -
  • A SNES emulator that can run ROM files. There are many emulators available online for different platforms, such as Windows, Mac, Android, and iOS. Some popular ones are ZSNES, Snes9x, and RetroArch.
  • -
  • A ROM file of Captain Tsubasa 3. You can find this file online by searching for "captain tsubasa 3 rom". Make sure you download it from a reliable source and scan it for viruses before opening it.
  • -
  • A patch file of Captain Tsubasa 3 Hack. You can find this file online by searching for "captain tsubasa 3 hack download". One source is [^1^], where you can download the file named "Captain Tsubasa 3 SNES First Hack by tommy 2017.smc". Another source is [^2^], where you can download the file named "captaintsubasa3hackdownload.pdf".
  • -
  • A patching program that can apply the patch file to the ROM file. There are many patching programs available online, such as Lunar IPS, Flips, and MultiPatch.
  • -
-

Once you have all these things, you can follow these steps to download and play Captain Tsubasa 3 Hack:

-
    -
  1. Open your patching program and select the patch file and the ROM file. Make sure they have the same name and extension (for example, "Captain Tsubasa 3.smc" and "Captain Tsubasa 3.smc").
  2. -
  3. Click on "Apply Patch" or "Patch" or whatever option your program has. This will create a new ROM file with the hack applied to it.
  4. -
  5. Open your emulator and load the new ROM file. You should see a title screen that says "Captain Tsubasa 3 Hack" or something similar.
  6. -
  7. Enjoy playing Captain Tsubasa 3 Hack!
  8. -
-

Note: This article is for informational purposes only. We do not condone or encourage piracy or illegal downloading of any kind. Please support the original creators of Captain Tsubasa 3 by buying their game legally.

If you want to learn more about Captain Tsubasa 3 Hack, you can check out some online videos and reviews that showcase the game and its features. For example, you can watch , where the user Tommy2 demonstrates some gameplay and special moves of the hack version. You can also read , where the user Captaintsubasa3hackdownload gives a brief overview of the game and its download link.

-

-

Captain Tsubasa 3 Hack is a fun and challenging game for fans of Captain Tsubasa and soccer games in general. It offers a new and improved experience of the original game, with more options and customization. If you are looking for a way to spice up your Captain Tsubasa 3 gameplay, you might want to give this hack a try.

d5da3c52bf
-
-
\ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Kanchana 3 PATCHED Full Movie In Tamil Hd 1080p.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Kanchana 3 PATCHED Full Movie In Tamil Hd 1080p.md deleted file mode 100644 index d1860602ce39dee8e6ea552e32dd16dc7a765bf7..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Kanchana 3 PATCHED Full Movie In Tamil Hd 1080p.md +++ /dev/null @@ -1,114 +0,0 @@ -
-

Kanchana 3: A Spooky and Funny Ride with Raghava Lawrence and Co.

- -

If you are a fan of Tamil horror comedy movies, you must have heard of Kanchana 3, the fourth installment in the Muni series. Kanchana 3 is a 2019 movie that was co-produced, written and directed by Raghava Lawrence, who also played the dual role of Raghava and Kaali. The movie also starred Oviya, Vedhika, Nikki Tamboli and Ri Djavi Alexandra as the female leads, while Kovai Sarala, Soori, Tarun Arora and Kabir Duhan Singh played the supporting roles.

- -

Kanchana 3 follows the story of Raghava, a young man who gets easily scared by ghosts and spirits. He lives with his mother (Kovai Sarala), brother (Sriman) and sister-in-law (Devadarshini). One day, he goes to his ancestral home with his family and his girlfriend Priya (Oviya). There, he meets his grandfather (Delhi Ganesh), who tells him about his past as Kaali, a powerful leader who fought against a corrupt politician (Tarun Arora) and his henchman Bhavani (Kabir Duhan Singh). Kaali was killed by Bhavani along with his wife (Vedhika) and his lover (Nikki Tamboli). However, their spirits remained in the house, waiting for revenge. Raghava gets possessed by Kaali's spirit and decides to take on Bhavani and his men.

-

kanchana 3 full movie in tamil hd 1080p


Download ○○○ https://bytlly.com/2uGvzP



- -

Kanchana 3 is a typical horror comedy movie that has all the elements of the genre: jump scares, comedy scenes, songs, dances, fights and sentiments. The movie is full of entertainment and fun for the audience who enjoy this kind of movies. The movie also has a social message about women empowerment and corruption. The movie was well received by the fans and critics alike, and became a huge commercial success at the box office.

- -

How to Watch Kanchana 3 Full Movie in Tamil HD 1080p

- -

If you want to watch Kanchana 3 full movie in Tamil HD 1080p, you have several options to choose from. You can either watch it online or download it to your device. Here are some of the ways you can watch Kanchana 3 full movie in Tamil HD 1080p:

- -
    -
  • Watch it online on streaming platforms: You can watch Kanchana 3 full movie online on various streaming platforms such as Zee5, Sun Nxt and VI movies and tv. You just need to have a subscription or a membership to access these platforms. You can also watch it on Google Play Movies if you want to rent or buy it.
  • -
  • Download it from torrent sites: You can also download Kanchana 3 full movie from torrent sites such as Tamilyogi HD, TamilGun and TamilRockers. However, this is not a legal or safe option as you may face legal issues or malware threats. You should avoid downloading movies from torrent sites as much as possible.
  • -
  • Watch it on TV channels: You can also watch Kanchana 3 full movie on TV channels such as Sun TV, Zee Tamil and Star Vijay. You just need to check the schedule of these channels and tune in at the right time. You can also record the movie if you have a DVR or a set-top box.
  • -
- -

These are some of the ways you can watch Kanchana 3 full movie in Tamil HD 1080p. However, you should always respect the rights of the creators and producers of the movie and watch it legally and ethically.

-

How to Enjoy Kanchana 3 Full Movie in Tamil HD 1080p

- -

Kanchana 3 is a movie that can be enjoyed by anyone who loves horror comedy movies. The movie has a lot of elements that can make you laugh, scream and cheer. Here are some tips on how to enjoy Kanchana 3 full movie in Tamil HD 1080p:

- -
    -
  • Watch it with your friends or family: Kanchana 3 is a movie that is best enjoyed with your loved ones. You can share the fun and excitement of the movie with them and have a great time together. You can also discuss the movie after watching it and share your opinions and feedback.
  • -
  • Watch it with good sound and picture quality: Kanchana 3 is a movie that has a lot of visual and audio effects that can enhance your viewing experience. You should watch it with good sound and picture quality to appreciate the movie better. You can use headphones, speakers, or a home theater system to get the best sound quality. You can also use a big screen, a projector, or a smart TV to get the best picture quality.
  • -
  • Watch it with an open mind: Kanchana 3 is a movie that has a lot of twists and turns that can surprise you. You should watch it with an open mind and not expect anything from the movie. You should also not take the movie too seriously or too literally. You should just enjoy the movie for what it is: a horror comedy entertainer.
  • -
- -

These are some of the ways you can enjoy Kanchana 3 full movie in Tamil HD 1080p. You can also watch it again if you liked it or recommend it to others if you loved it.

- -

Conclusion

- -

Kanchana 3 is a horror comedy movie that was released in 2019. It was co-produced, written and directed by Raghava Lawrence, who also played the dual role of Raghava and Kaali. The movie also starred Oviya, Vedhika, Nikki Tamboli and Ri Djavi Alexandra as the female leads, while Kovai Sarala, Soori, Tarun Arora and Kabir Duhan Singh played the supporting roles.

- -

Kanchana 3 follows the story of Raghava, a young man who gets easily scared by ghosts and spirits. He goes to his ancestral home with his family and his girlfriend Priya. There, he meets his grandfather, who tells him about his past as Kaali, a powerful leader who fought against a corrupt politician and his henchman Bhavani. Kaali was killed by Bhavani along with his wife and his lover. However, their spirits remained in the house, waiting for revenge. Raghava gets possessed by Kaali's spirit and decides to take on Bhavani and his men.

-

- -

Kanchana 3 is a typical horror comedy movie that has all the elements of the genre: jump scares, comedy scenes, songs, dances, fights and sentiments. The movie is full of entertainment and fun for the audience who enjoy this kind of movies. The movie also has a social message about women empowerment and corruption. The movie was well received by the fans and critics alike, and became a huge commercial success at the box office.

- -

If you want to watch Kanchana 3 full movie in Tamil HD 1080p, you have several options to choose from. You can either watch it online or download it to your device. You can also watch it on TV channels if you prefer. However, you should always respect the rights of the creators and producers of the movie and watch it legally and ethically.

- -

Kanchana 3 is a movie that can be enjoyed by anyone who loves horror comedy movies. It is a spooky and funny ride with Raghava Lawrence and co. Don't miss it!

-

How to Review Kanchana 3 Full Movie in Tamil HD 1080p

- -

If you have watched Kanchana 3 full movie in Tamil HD 1080p and you want to share your opinion and feedback about it, you can write a review of the movie. A review is a personal and critical evaluation of a movie that can help other people decide whether to watch it or not. Here are some tips on how to write a review of Kanchana 3 full movie in Tamil HD 1080p:

- -
    -
  • Introduce the movie: You should start your review by introducing the movie, its title, genre, director, cast and plot summary. You should also mention when and where you watched the movie and what your expectations were.
  • -
  • Analyze the movie: You should then analyze the movie, its strengths and weaknesses, its themes and messages, its technical aspects and its entertainment value. You should support your analysis with examples and evidence from the movie.
  • -
  • Evaluate the movie: You should then evaluate the movie, its overall quality, its impact and its relevance. You should also compare it with other movies of the same genre or series. You should give your personal rating or recommendation of the movie.
  • -
  • Conclude the review: You should end your review by summarizing your main points and giving your final verdict of the movie. You should also invite your readers to share their comments or questions about the movie.
  • -
- -

These are some of the ways you can write a review of Kanchana 3 full movie in Tamil HD 1080p. You can also read other reviews of the movie online or offline to get some inspiration and ideas.

- -

How to Enjoy More Movies Like Kanchana 3 Full Movie in Tamil HD 1080p

- -

If you enjoyed watching Kanchana 3 full movie in Tamil HD 1080p and you want to watch more movies like it, you have many options to choose from. You can either watch the previous movies of the Muni series or watch other horror comedy movies from Tamil cinema or other industries. Here are some of the movies you can watch if you liked Kanchana 3 full movie in Tamil HD 1080p:

- -
    -
  • Muni (2007): The first movie of the Muni series that introduced Raghava Lawrence as Raghava, a young man who gets possessed by a ghost named Muni.
  • -
  • Kanchana (2011): The second movie of the Muni series that featured Raghava Lawrence as Raghava and Kaali, a transgender woman who seeks revenge for her murder.
  • -
  • Kanchana 2 (2015): The third movie of the Muni series that starred Raghava Lawrence as Raghava and Shiva, a TV cameraman who gets haunted by a ghost named Naga.
  • -
  • Dhilluku Dhuddu (2016): A horror comedy movie that starred Santhanam as Kumar, a happy-go-lucky man who falls in love with a girl whose father is a ghost hunter.
  • -
  • Devi (2016): A horror comedy movie that starred Prabhu Deva as Krishna Kumar, a man who gets married to a woman who is possessed by an actress named Ruby.
  • -
  • Zombie (2019): A horror comedy movie that starred Yogi Babu as Mario, a security guard who gets trapped in a resort with his friends during a zombie outbreak.
  • -
- -

These are some of the movies you can watch if you enjoyed Kanchana 3 full movie in Tamil HD 1080p. You can also explore other movies of different genres and languages that can entertain you and make you laugh.

-

How to Learn More About Kanchana 3 Full Movie in Tamil HD 1080p

- -

If you want to learn more about Kanchana 3 full movie in Tamil HD 1080p, you can do some research online or offline. You can find more information about the movie, its cast and crew, its production and release, its trivia and facts, its awards and nominations, its reviews and ratings, and its fan reactions and feedback. Here are some of the sources you can use to learn more about Kanchana 3 full movie in Tamil HD 1080p:

- -
    -
  • Wikipedia: You can read the Wikipedia page of Kanchana 3 full movie in Tamil HD 1080p to get a comprehensive overview of the movie, its plot, its cast and crew, its box office performance, its reception and its legacy.
  • -
  • IMDb: You can visit the IMDb page of Kanchana 3 full movie in Tamil HD 1080p to get more details about the movie, such as its genre, its runtime, its release date, its language, its country of origin, its budget and gross, its soundtrack and score, its technical specifications and its trivia and goofs.
  • -
  • YouTube: You can watch the YouTube videos of Kanchana 3 full movie in Tamil HD 1080p to see the trailer, the songs, the scenes, the interviews, the behind-the-scenes footage and the fan-made videos of the movie.
  • -
  • Social media: You can follow the social media accounts of Kanchana 3 full movie in Tamil HD 1080p to get the latest updates, news, photos and videos of the movie. You can also interact with other fans and share your opinions and feedback about the movie.
  • -
  • Books and magazines: You can read the books and magazines that feature Kanchana 3 full movie in Tamil HD 1080p to get more insights and perspectives about the movie. You can also learn about the history and culture of Tamil cinema and horror comedy genre.
  • -
- -

These are some of the ways you can learn more about Kanchana 3 full movie in Tamil HD 1080p. You can also explore other sources that can enrich your knowledge and understanding of the movie.

- -

Conclusion

- -

Kanchana 3 is a horror comedy movie that was released in 2019. It was co-produced, written and directed by Raghava Lawrence, who also played the dual role of Raghava and Kaali. The movie also starred Oviya, Vedhika, Nikki Tamboli and Ri Djavi Alexandra as the female leads, while Kovai Sarala, Soori, Tarun Arora and Kabir Duhan Singh played the supporting roles.

- -

Kanchana 3 follows the story of Raghava, a young man who gets easily scared by ghosts and spirits. He goes to his ancestral home with his family and his girlfriend Priya. There, he meets his grandfather, who tells him about his past as Kaali, a powerful leader who fought against a corrupt politician and his henchman Bhavani. Kaali was killed by Bhavani along with his wife and his lover. However, their spirits remained in the house, waiting for revenge. Raghava gets possessed by Kaali's spirit and decides to take on Bhavani and his men.

- -

Kanchana 3 is a typical horror comedy movie that has all the elements of the genre: jump scares, comedy scenes, songs, dances, fights and sentiments. The movie is full of entertainment and fun for the audience who enjoy this kind of movies. The movie also has a social message about women empowerment and corruption. The movie was well received by the fans and critics alike, and became a huge commercial success at the box office.

- -

If you want to watch Kanchana 3 full movie in Tamil HD 1080p, you have several options to choose from. You can either watch it online or download it to your device. You can also watch it on TV channels if you prefer. However, you should always respect the rights of the creators and producers of the movie and watch it legally and ethically.

- -

If you enjoyed watching Kanchana 3 full movie in Tamil HD 1080p and you want to watch more movies like it or learn more about it, you have many options to choose from. You can either watch the previous movies of the Muni series or watch other horror comedy movies from Tamil cinema or other industries. You can also do some research online or offline to find more information about the movie.

- -

Kanchana 3 is a movie that can be enjoyed by anyone who loves horror comedy movies. It is a spooky and funny ride with Raghava Lawrence and co. Don't miss it!

-

Conclusion

- -

Kanchana 3 is a horror comedy movie that was released in 2019. It was co-produced, written and directed by Raghava Lawrence, who also played the dual role of Raghava and Kaali. The movie also starred Oviya, Vedhika, Nikki Tamboli and Ri Djavi Alexandra as the female leads, while Kovai Sarala, Soori, Tarun Arora and Kabir Duhan Singh played the supporting roles.

- -

Kanchana 3 follows the story of Raghava, a young man who gets easily scared by ghosts and spirits. He goes to his ancestral home with his family and his girlfriend Priya. There, he meets his grandfather, who tells him about his past as Kaali, a powerful leader who fought against a corrupt politician and his henchman Bhavani. Kaali was killed by Bhavani along with his wife and his lover. However, their spirits remained in the house, waiting for revenge. Raghava gets possessed by Kaali's spirit and decides to take on Bhavani and his men.

- -

Kanchana 3 is a typical horror comedy movie that has all the elements of the genre: jump scares, comedy scenes, songs, dances, fights and sentiments. The movie is full of entertainment and fun for the audience who enjoy this kind of movies. The movie also has a social message about women empowerment and corruption. The movie was well received by the fans and critics alike, and became a huge commercial success at the box office.

- -

If you want to watch Kanchana 3 full movie in Tamil HD 1080p, you have several options to choose from. You can either watch it online or download it to your device. You can also watch it on TV channels if you prefer. However, you should always respect the rights of the creators and producers of the movie and watch it legally and ethically.

- -

If you enjoyed watching Kanchana 3 full movie in Tamil HD 1080p and you want to watch more movies like it or learn more about it, you have many options to choose from. You can either watch the previous movies of the Muni series or watch other horror comedy movies from Tamil cinema or other industries. You can also do some research online or offline to find more information about the movie.

- -

Kanchana 3 is a movie that can be enjoyed by anyone who loves horror comedy movies. It is a spooky and funny ride with Raghava Lawrence and co. Don't miss it!

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Matlab 7 Version BETTER Free Download 32 Bit Full Crack.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Matlab 7 Version BETTER Free Download 32 Bit Full Crack.md deleted file mode 100644 index d6235e3850e83f79dd6552942bd780ca046be676..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Matlab 7 Version BETTER Free Download 32 Bit Full Crack.md +++ /dev/null @@ -1,136 +0,0 @@ -
-

MATLAB 2007 for Windows 32-bit: How to Download and Crack It for Free

- -

If you are looking for a powerful and reliable software for engineering and scientific applications, you might want to consider MATLAB. MATLAB is a programming and numeric computing platform that allows you to plot functions and data, manipulate matrices, run parallel algorithms, create models for control loops, and perform many other tasks. MATLAB has many versions and updates, but one of the most stable and widely used ones is MATLAB 2007.

-

matlab 7 version free download 32 bit full crack


DOWNLOAD >>>>> https://bytlly.com/2uGwUm



- -

MATLAB 2007 is compatible with Windows XP, Vista, 7, and 8, and it does not require a lot of memory or disk space. It also has many features and improvements that make it a great choice for Windows 32-bit users. However, MATLAB 2007 is not free, and you need to pay a fee or subscription to use it. But don't worry, because in this article, we will show you how to download and crack MATLAB 2007 for Windows 32-bit for free, without any limitations or restrictions.

- -

What are the Features and Improvements of MATLAB 2007?

- -

MATLAB 2007 has many features and improvements that make it a powerful and reliable tool for engineering and scientific applications. Some of them are:

- -
    -
  • Distributed Computational Toolbox: This toolbox allows you to run parallel algorithms in four MATLAB sessions on your desktop, using your multicore processor or networked computers.
  • -
  • Control System Toolbox: This toolbox enables you to create exact models for control loops, using linear and nonlinear methods. You can also design and tune controllers, analyze system performance, and simulate dynamic systems.
  • -
  • Categorical and Dataset Arrays: These data types allow you to organize and manipulate statistical data in a convenient way. You can perform operations such as grouping, sorting, filtering, merging, and plotting on categorical and dataset arrays.
  • -
  • New Functions: MATLAB 2007 introduces many new functions that support the categorical and dataset arrays, as well as other tasks. For example, cholcov function computes the Cholesky-like decomposition of a covariance matrix, linehyptest performs the linear hypothesis test, and ranksum performs the Wilcoxon rank sum test.
  • -
- -

How to Download MATLAB 2007 for Windows 32-bit?

- -

To download MATLAB 2007 for Windows 32-bit, you need to follow these steps:

- -
    -
  1. Go to this link to access the download page of MATLAB 2007 Full ISO Setup.
  2. -
  3. Click on the green button that says "Download Now".
  4. -
  5. Wait for the download to complete. The file size is about 3 GB.
  6. -
  7. Save the file on your computer. The file name is Matlab_R2007B_Full_Setup.iso.
  8. -
- -

How to Install MATLAB 2007 for Windows 32-bit?

- -

To install MATLAB 2007 for Windows 32-bit, you need to follow these steps:

- -
    -
  1. Mount the ISO file using a virtual drive software such as Daemon Tools or PowerISO.
  2. -
  3. Open the mounted drive and run the setup.exe file.
  4. -
  5. Follow the instructions on the screen to complete the installation process.
  6. -
  7. Do not launch MATLAB after the installation is finished.
  8. -
- -

How to Crack MATLAB 2007 for Windows 32-bit?

- -

To crack MATLAB 2007 for Windows 32-bit, you need to follow these steps:

-

- -
    -
  1. Download the crack file from this link.
  2. -
  3. Extract the zip file using a software such as WinRAR or 7-Zip.
  4. -
  5. Copy the file named "libmwservices.dll" from the extracted folder.
  6. -
  7. Paste the file in the installation directory of MATLAB 2007. The default location is C:\Program Files\MATLAB\R2007b\bin\win32.
  8. -
  9. Replace the existing file when prompted.
  10. -
  11. Launch MATLAB from the desktop shortcut or start menu.
  12. -
- -

Congratulations! You have successfully installed and cracked MATLAB 2007 for Windows 32-bit. You can now enjoy the full version of MATLAB 2007 without any limitations or restrictions.

- -

Conclusion

- -

MATLAB 2007 is a great software for engineering and scientific applications. It has many features and improvements that make it a powerful and reliable tool. If you want to get MATLAB 2007 for Windows 32-bit with full crack, you can follow the steps in this article. We hope this article was helpful and informative. If you have any questions or comments, feel free to leave them below.

-

What are the Challenges and Risks of Using MATLAB 2007 for Windows 32-bit?

- -

While MATLAB 2007 for Windows 32-bit has many benefits, it also has some challenges and risks that you should be aware of. Some of them are:

- -
    -
  • Compatibility Issues: MATLAB 2007 may not be compatible with some newer versions of Windows, such as Windows 10. It may also not work well with some newer hardware and software, such as graphics cards and drivers. You may need to update or downgrade some components to make MATLAB 2007 work properly.
  • -
  • Security Issues: MATLAB 2007 may not have the latest security patches and updates, which may make it vulnerable to viruses, malware, and hackers. You should use a reliable antivirus program and firewall to protect your system and data.
  • -
  • Legal Issues: MATLAB 2007 is a licensed software that requires a fee or subscription to use. Downloading and cracking MATLAB 2007 for free may violate the terms and conditions of MATLAB and Mathworks, and may result in legal actions or penalties. You should use MATLAB 2007 at your own risk and responsibility.
  • -
- -

How to Update or Upgrade MATLAB 2007 for Windows 32-bit?

- -

If you want to update or upgrade MATLAB 2007 for Windows 32-bit, you have two options:

- -
    -
  1. Update MATLAB 2007: You can check for updates and patches for MATLAB 2007 by using the Help menu in MATLAB. You can also visit the official website of MATLAB and Mathworks to download the latest updates and patches.
  2. -
  3. Upgrade MATLAB 2007: You can upgrade MATLAB 2007 to a newer version of MATLAB by purchasing a license or subscription from Mathworks. You can also download a trial version of the latest MATLAB from the official website of MATLAB and Mathworks.
  4. -
- -

Updating or upgrading MATLAB 2007 may improve its performance, compatibility, security, and functionality. However, it may also require more system resources, change some features or functions, and invalidate your crack file.

- -

Conclusion

- -

MATLAB 2007 is a great software for engineering and scientific applications. It has many features and improvements that make it a powerful and reliable tool for Windows 32-bit users. However, it also has some challenges and risks that you should be aware of. If you want to download and crack MATLAB 2007 for Windows 32-bit for free, you can follow the steps in this article. However, you should also consider the legal and ethical implications of doing so. We hope this article was helpful and informative. If you have any questions or comments, feel free to leave them below.

-

What are the Alternatives to MATLAB 2007 for Windows 32-bit?

- -

If you are looking for alternatives to MATLAB 2007 for Windows 32-bit, you have several options. Some of them are:

- -
    -
  • Octave: Octave is a free and open-source software that is compatible with MATLAB. It can perform numerical computations, plot graphs, and run scripts and functions written in MATLAB. However, Octave may not have all the features and toolboxes of MATLAB, and it may have some syntax differences.
  • -
  • Scilab: Scilab is another free and open-source software that is similar to MATLAB. It can perform matrix operations, data analysis, signal processing, optimization, and simulation. It also has a graphical user interface and a rich set of toolboxes. However, Scilab may not be fully compatible with MATLAB, and it may have some performance issues.
  • -
  • Python: Python is a general-purpose programming language that can be used for scientific computing. It has many libraries and packages that can perform tasks similar to MATLAB, such as NumPy, SciPy, Matplotlib, Pandas, and TensorFlow. However, Python may require more coding and debugging than MATLAB, and it may have some compatibility and dependency issues.
  • -
- -

How to Learn MATLAB 2007 for Windows 32-bit?

- -

If you want to learn MATLAB 2007 for Windows 32-bit, you have several resources available. Some of them are:

- -
    -
  • MATLAB Help: MATLAB has a built-in help system that provides documentation and examples for all the commands and functions of MATLAB. You can access it by using the Help menu in MATLAB or by typing help or doc in the Command Window.
  • -
  • MATLAB Tutorials: MATLAB has a set of tutorials that cover the basics of MATLAB, such as variables, operators, arrays, loops, functions, plots, and GUIs. You can access them by using the Help menu in MATLAB or by visiting the official website of MATLAB and Mathworks.
  • -
  • MATLAB Books: There are many books that teach you how to use MATLAB for various purposes, such as engineering, mathematics, statistics, machine learning, image processing, and more. You can find them online or in your local library or bookstore.
  • -
  • MATLAB Courses: There are many online courses that offer interactive lessons and exercises on how to use MATLAB for different applications. You can find them on platforms such as Coursera, edX, Udemy, Khan Academy, and more.
  • -
-

How to Uninstall MATLAB 2007 for Windows 32-bit?

- -

If you want to uninstall MATLAB 2007 for Windows 32-bit, you need to follow these steps:

- -
    -
  1. Go to the Control Panel and select Programs and Features.
  2. -
  3. Find MATLAB 2007 in the list of installed programs and click on Uninstall.
  4. -
  5. Follow the instructions on the screen to complete the uninstallation process.
  6. -
  7. Delete the MATLAB 2007 folder from your computer. The default location is C:\Program Files\MATLAB\R2007b.
  8. -
  9. Delete the crack file from your computer. The file name is libmwservices.dll.
  10. -
- -

Uninstalling MATLAB 2007 will free up some disk space and memory on your system. However, it will also remove all the features and functions of MATLAB 2007 from your system.

- -

How to Troubleshoot MATLAB 2007 for Windows 32-bit?

- -

If you encounter any problems or errors while using MATLAB 2007 for Windows 32-bit, you can try these solutions:

- -
    -
  • Check your system requirements and compatibility. Make sure your Windows 32-bit system meets the minimum requirements for MATLAB 2007, and that it is compatible with MATLAB 2007.
  • -
  • Check your installation and crack. Make sure you have installed MATLAB 2007 correctly and applied the crack file properly. You can also try to reinstall MATLAB 2007 and reapply the crack file.
  • -
  • Check your internet connection and firewall. Make sure you have a stable and secure internet connection, and that your firewall is not blocking MATLAB 2007 or its components.
  • -
  • Check your antivirus program and malware. Make sure your antivirus program is not interfering with MATLAB 2007 or its components, and that your system is free of viruses, malware, and hackers.
  • -
  • Check the MATLAB help and support. You can use the MATLAB help system or visit the official website of MATLAB and Mathworks to get help and support for MATLAB 2007. You can also search online for solutions or forums related to MATLAB 2007.
  • -
- -

Troubleshooting MATLAB 2007 may help you resolve some of the problems or errors that you may face while using it. However, it may not guarantee a perfect performance or functionality of MATLAB 2007.

-

Conclusion

- -

MATLAB 2007 is a great software for engineering and scientific applications. It has many features and improvements that make it a powerful and reliable tool for Windows 32-bit users. However, it also has some challenges and risks that you should be aware of. If you want to download and crack MATLAB 2007 for Windows 32-bit for free, you can follow the steps in this article. However, you should also consider the legal and ethical implications of doing so. We hope this article was helpful and informative. If you have any questions or comments, feel free to leave them below.

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/liuyuan-pal/SyncDreamer/ldm/modules/diffusionmodules/openaimodel.py b/spaces/liuyuan-pal/SyncDreamer/ldm/modules/diffusionmodules/openaimodel.py deleted file mode 100644 index 1e0dc94e240f927985d8edbf2f38aa5ac28641e2..0000000000000000000000000000000000000000 --- a/spaces/liuyuan-pal/SyncDreamer/ldm/modules/diffusionmodules/openaimodel.py +++ /dev/null @@ -1,996 +0,0 @@ -from abc import abstractmethod -from functools import partial -import math -from typing import Iterable - -import numpy as np -import torch as th -import torch.nn as nn -import torch.nn.functional as F - -from ldm.modules.diffusionmodules.util import ( - checkpoint, - conv_nd, - linear, - avg_pool_nd, - zero_module, - normalization, - timestep_embedding, -) -from ldm.modules.attention import SpatialTransformer -from ldm.util import exists - - -# dummy replace -def convert_module_to_f16(x): - pass - -def convert_module_to_f32(x): - pass - - -## go -class AttentionPool2d(nn.Module): - """ - Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py - """ - - def __init__( - self, - spacial_dim: int, - embed_dim: int, - num_heads_channels: int, - output_dim: int = None, - ): - super().__init__() - self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) - self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) - self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) - self.num_heads = embed_dim // num_heads_channels - self.attention = QKVAttention(self.num_heads) - - def forward(self, x): - b, c, *_spatial = x.shape - x = x.reshape(b, c, -1) # NC(HW) - x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) - x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) - x = self.qkv_proj(x) - x = self.attention(x) - x = self.c_proj(x) - return x[:, :, 0] - - -class TimestepBlock(nn.Module): - """ - Any module where forward() takes timestep embeddings as a second argument. - """ - - @abstractmethod - def forward(self, x, emb): - """ - Apply the module to `x` given `emb` timestep embeddings. - """ - - -class TimestepEmbedSequential(nn.Sequential, TimestepBlock): - """ - A sequential module that passes timestep embeddings to the children that - support it as an extra input. - """ - - def forward(self, x, emb, context=None): - for layer in self: - if isinstance(layer, TimestepBlock): - x = layer(x, emb) - elif isinstance(layer, SpatialTransformer): - x = layer(x, context) - else: - x = layer(x) - return x - - -class Upsample(nn.Module): - """ - An upsampling layer with an optional convolution. - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then - upsampling occurs in the inner-two dimensions. - """ - - def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.dims = dims - if use_conv: - self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) - - def forward(self, x): - assert x.shape[1] == self.channels - if self.dims == 3: - x = F.interpolate( - x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" - ) - else: - x = F.interpolate(x, scale_factor=2, mode="nearest") - if self.use_conv: - x = self.conv(x) - return x - -class TransposedUpsample(nn.Module): - 'Learned 2x upsampling without padding' - def __init__(self, channels, out_channels=None, ks=5): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - - self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) - - def forward(self,x): - return self.up(x) - - -class Downsample(nn.Module): - """ - A downsampling layer with an optional convolution. - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then - downsampling occurs in the inner-two dimensions. - """ - - def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.dims = dims - stride = 2 if dims != 3 else (1, 2, 2) - if use_conv: - self.op = conv_nd( - dims, self.channels, self.out_channels, 3, stride=stride, padding=padding - ) - else: - assert self.channels == self.out_channels - self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) - - def forward(self, x): - assert x.shape[1] == self.channels - return self.op(x) - - -class ResBlock(TimestepBlock): - """ - A residual block that can optionally change the number of channels. - :param channels: the number of input channels. - :param emb_channels: the number of timestep embedding channels. - :param dropout: the rate of dropout. - :param out_channels: if specified, the number of out channels. - :param use_conv: if True and out_channels is specified, use a spatial - convolution instead of a smaller 1x1 convolution to change the - channels in the skip connection. - :param dims: determines if the signal is 1D, 2D, or 3D. - :param use_checkpoint: if True, use gradient checkpointing on this module. - :param up: if True, use this block for upsampling. - :param down: if True, use this block for downsampling. - """ - - def __init__( - self, - channels, - emb_channels, - dropout, - out_channels=None, - use_conv=False, - use_scale_shift_norm=False, - dims=2, - use_checkpoint=False, - up=False, - down=False, - ): - super().__init__() - self.channels = channels - self.emb_channels = emb_channels - self.dropout = dropout - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.use_checkpoint = use_checkpoint - self.use_scale_shift_norm = use_scale_shift_norm - - self.in_layers = nn.Sequential( - normalization(channels), - nn.SiLU(), - conv_nd(dims, channels, self.out_channels, 3, padding=1), - ) - - self.updown = up or down - - if up: - self.h_upd = Upsample(channels, False, dims) - self.x_upd = Upsample(channels, False, dims) - elif down: - self.h_upd = Downsample(channels, False, dims) - self.x_upd = Downsample(channels, False, dims) - else: - self.h_upd = self.x_upd = nn.Identity() - - self.emb_layers = nn.Sequential( - nn.SiLU(), - linear( - emb_channels, - 2 * self.out_channels if use_scale_shift_norm else self.out_channels, - ), - ) - self.out_layers = nn.Sequential( - normalization(self.out_channels), - nn.SiLU(), - nn.Dropout(p=dropout), - zero_module( - conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) - ), - ) - - if self.out_channels == channels: - self.skip_connection = nn.Identity() - elif use_conv: - self.skip_connection = conv_nd( - dims, channels, self.out_channels, 3, padding=1 - ) - else: - self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) - - def forward(self, x, emb): - """ - Apply the block to a Tensor, conditioned on a timestep embedding. - :param x: an [N x C x ...] Tensor of features. - :param emb: an [N x emb_channels] Tensor of timestep embeddings. - :return: an [N x C x ...] Tensor of outputs. - """ - return checkpoint( - self._forward, (x, emb), self.parameters(), self.use_checkpoint - ) - - - def _forward(self, x, emb): - if self.updown: - in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] - h = in_rest(x) - h = self.h_upd(h) - x = self.x_upd(x) - h = in_conv(h) - else: - h = self.in_layers(x) - emb_out = self.emb_layers(emb).type(h.dtype) - while len(emb_out.shape) < len(h.shape): - emb_out = emb_out[..., None] - if self.use_scale_shift_norm: # False - out_norm, out_rest = self.out_layers[0], self.out_layers[1:] - scale, shift = th.chunk(emb_out, 2, dim=1) - h = out_norm(h) * (1 + scale) + shift - h = out_rest(h) - else: - h = h + emb_out - h = self.out_layers(h) - return self.skip_connection(x) + h - - -class AttentionBlock(nn.Module): - """ - An attention block that allows spatial positions to attend to each other. - Originally ported from here, but adapted to the N-d case. - https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. - """ - - def __init__( - self, - channels, - num_heads=1, - num_head_channels=-1, - use_checkpoint=False, - use_new_attention_order=False, - ): - super().__init__() - self.channels = channels - if num_head_channels == -1: - self.num_heads = num_heads - else: - assert ( - channels % num_head_channels == 0 - ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" - self.num_heads = channels // num_head_channels - self.use_checkpoint = use_checkpoint - self.norm = normalization(channels) - self.qkv = conv_nd(1, channels, channels * 3, 1) - if use_new_attention_order: - # split qkv before split heads - self.attention = QKVAttention(self.num_heads) - else: - # split heads before split qkv - self.attention = QKVAttentionLegacy(self.num_heads) - - self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) - - def forward(self, x): - return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! - #return pt_checkpoint(self._forward, x) # pytorch - - def _forward(self, x): - b, c, *spatial = x.shape - x = x.reshape(b, c, -1) - qkv = self.qkv(self.norm(x)) - h = self.attention(qkv) - h = self.proj_out(h) - return (x + h).reshape(b, c, *spatial) - - -def count_flops_attn(model, _x, y): - """ - A counter for the `thop` package to count the operations in an - attention operation. - Meant to be used like: - macs, params = thop.profile( - model, - inputs=(inputs, timestamps), - custom_ops={QKVAttention: QKVAttention.count_flops}, - ) - """ - b, c, *spatial = y[0].shape - num_spatial = int(np.prod(spatial)) - # We perform two matmuls with the same number of ops. - # The first computes the weight matrix, the second computes - # the combination of the value vectors. - matmul_ops = 2 * b * (num_spatial ** 2) * c - model.total_ops += th.DoubleTensor([matmul_ops]) - - -class QKVAttentionLegacy(nn.Module): - """ - A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv): - """ - Apply QKV attention. - :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. - :return: an [N x (H * C) x T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = th.einsum( - "bct,bcs->bts", q * scale, k * scale - ) # More stable with f16 than dividing afterwards - weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) - a = th.einsum("bts,bcs->bct", weight, v) - return a.reshape(bs, -1, length) - - @staticmethod - def count_flops(model, _x, y): - return count_flops_attn(model, _x, y) - - -class QKVAttention(nn.Module): - """ - A module which performs QKV attention and splits in a different order. - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv): - """ - Apply QKV attention. - :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. - :return: an [N x (H * C) x T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = qkv.chunk(3, dim=1) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = th.einsum( - "bct,bcs->bts", - (q * scale).view(bs * self.n_heads, ch, length), - (k * scale).view(bs * self.n_heads, ch, length), - ) # More stable with f16 than dividing afterwards - weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) - a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) - return a.reshape(bs, -1, length) - - @staticmethod - def count_flops(model, _x, y): - return count_flops_attn(model, _x, y) - - -class UNetModel(nn.Module): - """ - The full UNet model with attention and timestep embedding. - :param in_channels: channels in the input Tensor. - :param model_channels: base channel count for the model. - :param out_channels: channels in the output Tensor. - :param num_res_blocks: number of residual blocks per downsample. - :param attention_resolutions: a collection of downsample rates at which - attention will take place. May be a set, list, or tuple. - For example, if this contains 4, then at 4x downsampling, attention - will be used. - :param dropout: the dropout probability. - :param channel_mult: channel multiplier for each level of the UNet. - :param conv_resample: if True, use learned convolutions for upsampling and - downsampling. - :param dims: determines if the signal is 1D, 2D, or 3D. - :param num_classes: if specified (as an int), then this model will be - class-conditional with `num_classes` classes. - :param use_checkpoint: use gradient checkpointing to reduce memory usage. - :param num_heads: the number of attention heads in each attention layer. - :param num_heads_channels: if specified, ignore num_heads and instead use - a fixed channel width per attention head. - :param num_heads_upsample: works with num_heads to set a different number - of heads for upsampling. Deprecated. - :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. - :param resblock_updown: use residual blocks for up/downsampling. - :param use_new_attention_order: use a different attention pattern for potentially - increased efficiency. - """ - - def __init__( - self, - image_size, - in_channels, - model_channels, - out_channels, - num_res_blocks, - attention_resolutions, - dropout=0, - channel_mult=(1, 2, 4, 8), - conv_resample=True, - dims=2, - num_classes=None, - use_checkpoint=False, - use_fp16=False, - num_heads=-1, - num_head_channels=-1, - num_heads_upsample=-1, - use_scale_shift_norm=False, - resblock_updown=False, - use_new_attention_order=False, - use_spatial_transformer=False, # custom transformer support - transformer_depth=1, # custom transformer support - context_dim=None, # custom transformer support - n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model - legacy=True, - disable_self_attentions=None, - num_attention_blocks=None - ): - super().__init__() - if use_spatial_transformer: - assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' - - if context_dim is not None: - assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' - from omegaconf.listconfig import ListConfig - if type(context_dim) == ListConfig: - context_dim = list(context_dim) - - if num_heads_upsample == -1: - num_heads_upsample = num_heads - - if num_heads == -1: - assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' - - if num_head_channels == -1: - assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' - - self.image_size = image_size - self.in_channels = in_channels - self.model_channels = model_channels - self.out_channels = out_channels - if isinstance(num_res_blocks, int): - self.num_res_blocks = len(channel_mult) * [num_res_blocks] - else: - if len(num_res_blocks) != len(channel_mult): - raise ValueError("provide num_res_blocks either as an int (globally constant) or " - "as a list/tuple (per-level) with the same length as channel_mult") - self.num_res_blocks = num_res_blocks - #self.num_res_blocks = num_res_blocks - if disable_self_attentions is not None: - # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not - assert len(disable_self_attentions) == len(channel_mult) - if num_attention_blocks is not None: - assert len(num_attention_blocks) == len(self.num_res_blocks) - assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) - print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " - f"This option has LESS priority than attention_resolutions {attention_resolutions}, " - f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " - f"attention will still not be set.") # todo: convert to warning - - self.attention_resolutions = attention_resolutions - self.dropout = dropout - self.channel_mult = channel_mult - self.conv_resample = conv_resample - self.num_classes = num_classes - self.use_checkpoint = use_checkpoint - self.dtype = th.float16 if use_fp16 else th.float32 - self.num_heads = num_heads - self.num_head_channels = num_head_channels - self.num_heads_upsample = num_heads_upsample - self.predict_codebook_ids = n_embed is not None - - time_embed_dim = model_channels * 4 - self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim), - nn.SiLU(), - linear(time_embed_dim, time_embed_dim), - ) - - if self.num_classes is not None: - self.label_emb = nn.Embedding(num_classes, time_embed_dim) - - self.input_blocks = nn.ModuleList( - [ - TimestepEmbedSequential( - conv_nd(dims, in_channels, model_channels, 3, padding=1) - ) - ] - ) # 0 - self._feature_size = model_channels - input_block_chans = [model_channels] - ch = model_channels - ds = 1 - for level, mult in enumerate(channel_mult): - for nr in range(self.num_res_blocks[level]): - layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=mult * model_channels, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = mult * model_channels - if ds in attention_resolutions: # always True - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - #num_heads = 1 - dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - if exists(disable_self_attentions): - disabled_sa = disable_self_attentions[level] - else: - disabled_sa = False - - if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( - ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, - disable_self_attn=disabled_sa - ) - ) - self.input_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - input_block_chans.append(ch) - if level != len(channel_mult) - 1: - out_ch = ch - self.input_blocks.append( - TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True, - ) - if resblock_updown - else Downsample( - ch, conv_resample, dims=dims, out_channels=out_ch - ) - ) - ) - ch = out_ch - input_block_chans.append(ch) - ds *= 2 - self._feature_size += ch - - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - #num_heads = 1 - dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - self.middle_block = TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn - ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim - ), - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - ) - self._feature_size += ch - - self.output_blocks = nn.ModuleList([]) - for level, mult in list(enumerate(channel_mult))[::-1]: - for i in range(self.num_res_blocks[level] + 1): - ich = input_block_chans.pop() - layers = [ - ResBlock( - ch + ich, - time_embed_dim, - dropout, - out_channels=model_channels * mult, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = model_channels * mult - if ds in attention_resolutions: - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - #num_heads = 1 - dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - if exists(disable_self_attentions): - disabled_sa = disable_self_attentions[level] - else: - disabled_sa = False - - if not exists(num_attention_blocks) or i < num_attention_blocks[level]: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads_upsample, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( - ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, - disable_self_attn=disabled_sa - ) - ) - if level and i == self.num_res_blocks[level]: - out_ch = ch - layers.append( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - up=True, - ) - if resblock_updown - else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) - ) - ds //= 2 - self.output_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), - ) - if self.predict_codebook_ids: - self.id_predictor = nn.Sequential( - normalization(ch), - conv_nd(dims, model_channels, n_embed, 1), - #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits - ) - - def convert_to_fp16(self): - """ - Convert the torso of the model to float16. - """ - self.input_blocks.apply(convert_module_to_f16) - self.middle_block.apply(convert_module_to_f16) - self.output_blocks.apply(convert_module_to_f16) - - def convert_to_fp32(self): - """ - Convert the torso of the model to float32. - """ - self.input_blocks.apply(convert_module_to_f32) - self.middle_block.apply(convert_module_to_f32) - self.output_blocks.apply(convert_module_to_f32) - - def forward(self, x, timesteps=None, context=None, y=None,**kwargs): - """ - Apply the model to an input batch. - :param x: an [N x C x ...] Tensor of inputs. - :param timesteps: a 1-D batch of timesteps. - :param context: conditioning plugged in via crossattn - :param y: an [N] Tensor of labels, if class-conditional. - :return: an [N x C x ...] Tensor of outputs. - """ - assert (y is not None) == ( - self.num_classes is not None - ), "must specify y if and only if the model is class-conditional" - hs = [] - t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) # N - emb = self.time_embed(t_emb) # - - if self.num_classes is not None: - assert y.shape == (x.shape[0],) - emb = emb + self.label_emb(y) - - h = x.type(self.dtype) - for module in self.input_blocks: - h = module(h, emb, context) # conv - hs.append(h) - h = self.middle_block(h, emb, context) - for module in self.output_blocks: - h = th.cat([h, hs.pop()], dim=1) - h = module(h, emb, context) - h = h.type(x.dtype) - if self.predict_codebook_ids: - return self.id_predictor(h) - else: - return self.out(h) - - -class EncoderUNetModel(nn.Module): - """ - The half UNet model with attention and timestep embedding. - For usage, see UNet. - """ - - def __init__( - self, - image_size, - in_channels, - model_channels, - out_channels, - num_res_blocks, - attention_resolutions, - dropout=0, - channel_mult=(1, 2, 4, 8), - conv_resample=True, - dims=2, - use_checkpoint=False, - use_fp16=False, - num_heads=1, - num_head_channels=-1, - num_heads_upsample=-1, - use_scale_shift_norm=False, - resblock_updown=False, - use_new_attention_order=False, - pool="adaptive", - *args, - **kwargs - ): - super().__init__() - - if num_heads_upsample == -1: - num_heads_upsample = num_heads - - self.in_channels = in_channels - self.model_channels = model_channels - self.out_channels = out_channels - self.num_res_blocks = num_res_blocks - self.attention_resolutions = attention_resolutions - self.dropout = dropout - self.channel_mult = channel_mult - self.conv_resample = conv_resample - self.use_checkpoint = use_checkpoint - self.dtype = th.float16 if use_fp16 else th.float32 - self.num_heads = num_heads - self.num_head_channels = num_head_channels - self.num_heads_upsample = num_heads_upsample - - time_embed_dim = model_channels * 4 - self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim), - nn.SiLU(), - linear(time_embed_dim, time_embed_dim), - ) - - self.input_blocks = nn.ModuleList( - [ - TimestepEmbedSequential( - conv_nd(dims, in_channels, model_channels, 3, padding=1) - ) - ] - ) - self._feature_size = model_channels - input_block_chans = [model_channels] - ch = model_channels - ds = 1 - for level, mult in enumerate(channel_mult): - for _ in range(num_res_blocks): - layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=mult * model_channels, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = mult * model_channels - if ds in attention_resolutions: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=num_head_channels, - use_new_attention_order=use_new_attention_order, - ) - ) - self.input_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - input_block_chans.append(ch) - if level != len(channel_mult) - 1: - out_ch = ch - self.input_blocks.append( - TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True, - ) - if resblock_updown - else Downsample( - ch, conv_resample, dims=dims, out_channels=out_ch - ) - ) - ) - ch = out_ch - input_block_chans.append(ch) - ds *= 2 - self._feature_size += ch - - self.middle_block = TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=num_head_channels, - use_new_attention_order=use_new_attention_order, - ), - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - ) - self._feature_size += ch - self.pool = pool - if pool == "adaptive": - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - nn.AdaptiveAvgPool2d((1, 1)), - zero_module(conv_nd(dims, ch, out_channels, 1)), - nn.Flatten(), - ) - elif pool == "attention": - assert num_head_channels != -1 - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - AttentionPool2d( - (image_size // ds), ch, num_head_channels, out_channels - ), - ) - elif pool == "spatial": - self.out = nn.Sequential( - nn.Linear(self._feature_size, 2048), - nn.ReLU(), - nn.Linear(2048, self.out_channels), - ) - elif pool == "spatial_v2": - self.out = nn.Sequential( - nn.Linear(self._feature_size, 2048), - normalization(2048), - nn.SiLU(), - nn.Linear(2048, self.out_channels), - ) - else: - raise NotImplementedError(f"Unexpected {pool} pooling") - - def convert_to_fp16(self): - """ - Convert the torso of the model to float16. - """ - self.input_blocks.apply(convert_module_to_f16) - self.middle_block.apply(convert_module_to_f16) - - def convert_to_fp32(self): - """ - Convert the torso of the model to float32. - """ - self.input_blocks.apply(convert_module_to_f32) - self.middle_block.apply(convert_module_to_f32) - - def forward(self, x, timesteps): - """ - Apply the model to an input batch. - :param x: an [N x C x ...] Tensor of inputs. - :param timesteps: a 1-D batch of timesteps. - :return: an [N x K] Tensor of outputs. - """ - emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) - - results = [] - h = x.type(self.dtype) - for module in self.input_blocks: - h = module(h, emb) - if self.pool.startswith("spatial"): - results.append(h.type(x.dtype).mean(dim=(2, 3))) - h = self.middle_block(h, emb) - if self.pool.startswith("spatial"): - results.append(h.type(x.dtype).mean(dim=(2, 3))) - h = th.cat(results, axis=-1) - return self.out(h) - else: - h = h.type(x.dtype) - return self.out(h) - diff --git a/spaces/lkeab/transfiner/configs/common/data/coco_panoptic_separated.py b/spaces/lkeab/transfiner/configs/common/data/coco_panoptic_separated.py deleted file mode 100644 index 5ccbc77e64d1c92c99cbd7158d047bab54cb9f3d..0000000000000000000000000000000000000000 --- a/spaces/lkeab/transfiner/configs/common/data/coco_panoptic_separated.py +++ /dev/null @@ -1,26 +0,0 @@ -from detectron2.config import LazyCall as L -from detectron2.evaluation import ( - COCOEvaluator, - COCOPanopticEvaluator, - DatasetEvaluators, - SemSegEvaluator, -) - -from .coco import dataloader - -dataloader.train.dataset.names = "coco_2017_train_panoptic_separated" -dataloader.train.dataset.filter_empty = False -dataloader.test.dataset.names = "coco_2017_val_panoptic_separated" - - -dataloader.evaluator = [ - L(COCOEvaluator)( - dataset_name="${...test.dataset.names}", - ), - L(SemSegEvaluator)( - dataset_name="${...test.dataset.names}", - ), - L(COCOPanopticEvaluator)( - dataset_name="${...test.dataset.names}", - ), -] diff --git a/spaces/ls291/ChatSQL/utility/loggers.py b/spaces/ls291/ChatSQL/utility/loggers.py deleted file mode 100644 index 754a653673b06db61593d825c670d124a3781828..0000000000000000000000000000000000000000 --- a/spaces/ls291/ChatSQL/utility/loggers.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -@Time: 2022/11/03 -@Author: LiuShu -@File: loggers.py -""" -import os -from utility.constant import BASE_DIR -import logging -import logging.config - -LOGGING = { - 'version': 1, - 'disable_existing_loggers': True, - 'formatters': { - 'simple': { - 'format': '%(levelname)s %(message)s' - }, - 'standard': { - 'format': '[%(asctime)s] %(filename)s-[line:%(lineno)d] %(levelname)s--%(message)s', - 'datefmt': '%Y-%m-%d %H:%M:%S', - }, - }, - 'handlers': { - 'file': { - 'level': 'DEBUG', - 'class': 'logging.handlers.TimedRotatingFileHandler', - # TODO 文件路径修改位置 - 'filename': os.path.join(BASE_DIR, 'logs/server.log'), - 'formatter': 'standard', - 'when': 'D', - 'interval': 1, - 'backupCount': 7, - }, - 'null': { - 'level': 'DEBUG', - 'class': 'logging.StreamHandler', - }, - }, - 'loggers': { - 'django': { - 'handlers': ['null'], - 'level': 'ERROR', - 'propagate': True, - }, - 'system': { - 'handlers': ['file'], - 'level': 'DEBUG', - 'propagate': True, - }, - } -} - - -def get_logger(): - logging.config.dictConfig(LOGGING) - Logger = logging.getLogger("system") - return Logger - - -logger = get_logger() diff --git a/spaces/ltgoslo/ssa-perin/mtool/codec/__init__.py b/spaces/ltgoslo/ssa-perin/mtool/codec/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/lunadebruyne/EmotioNL/README.md b/spaces/lunadebruyne/EmotioNL/README.md deleted file mode 100644 index fd015b0ae13bbf368642200ec119b07814b62f69..0000000000000000000000000000000000000000 --- a/spaces/lunadebruyne/EmotioNL/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: EmotioNL -emoji: 🚀 -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/luxuedong/lxd/tests/parse.ts b/spaces/luxuedong/lxd/tests/parse.ts deleted file mode 100644 index 92940fe6315f1d7cb2b267ba5e5a7e26460a1de3..0000000000000000000000000000000000000000 --- a/spaces/luxuedong/lxd/tests/parse.ts +++ /dev/null @@ -1,13 +0,0 @@ -import { promises as fs } from 'fs' -import { join } from 'path' -import { parseHeadersFromCurl } from '@/lib/utils' - -(async () => { - const content = await fs.readFile(join(__dirname, './fixtures/curl.txt'), 'utf-8') - const headers = parseHeadersFromCurl(content) - console.log(headers) - - const cmdContent = await fs.readFile(join(__dirname, './fixtures/cmd.txt'), 'utf-8') - const cmdHeaders = parseHeadersFromCurl(cmdContent) - console.log(cmdHeaders) -})() diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/generic/uninitialized_copy.h b/spaces/ma-xu/LIVE/thrust/thrust/system/detail/generic/uninitialized_copy.h deleted file mode 100644 index 2d1b0010dfbfd8587bac2167b25cd4982d3ad468..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/generic/uninitialized_copy.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#pragma once - -#include -#include - -namespace thrust -{ -namespace system -{ -namespace detail -{ -namespace generic -{ - -template -__host__ __device__ - ForwardIterator uninitialized_copy(thrust::execution_policy &exec, - InputIterator first, - InputIterator last, - ForwardIterator result); - -template -__host__ __device__ - ForwardIterator uninitialized_copy_n(thrust::execution_policy &exec, - InputIterator first, - Size n, - ForwardIterator result); - -} // end namespace generic -} // end namespace detail -} // end namespace system -} // end namespace thrust - -#include - diff --git a/spaces/matthoffner/chatbot/utils/app/codeblock.ts b/spaces/matthoffner/chatbot/utils/app/codeblock.ts deleted file mode 100644 index d28c8aa97bd045cf8711c2e2284aa3aee035c453..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/chatbot/utils/app/codeblock.ts +++ /dev/null @@ -1,39 +0,0 @@ -interface languageMap { - [key: string]: string | undefined; -} - -export const programmingLanguages: languageMap = { - javascript: '.js', - python: '.py', - java: '.java', - c: '.c', - cpp: '.cpp', - 'c++': '.cpp', - 'c#': '.cs', - ruby: '.rb', - php: '.php', - swift: '.swift', - 'objective-c': '.m', - kotlin: '.kt', - typescript: '.ts', - go: '.go', - perl: '.pl', - rust: '.rs', - scala: '.scala', - haskell: '.hs', - lua: '.lua', - shell: '.sh', - sql: '.sql', - html: '.html', - css: '.css', - // add more file extensions here, make sure the key is same as language prop in CodeBlock.tsx component -}; - -export const generateRandomString = (length: number, lowercase = false) => { - const chars = 'ABCDEFGHJKLMNPQRSTUVWXY3456789'; // excluding similar looking characters like Z, 2, I, 1, O, 0 - let result = ''; - for (let i = 0; i < length; i++) { - result += chars.charAt(Math.floor(Math.random() * chars.length)); - } - return lowercase ? result.toLowerCase() : result; -}; diff --git a/spaces/merve/hidden-bias/public/anonymization/style-graph-scroll.css b/spaces/merve/hidden-bias/public/anonymization/style-graph-scroll.css deleted file mode 100644 index 7680e8c43222b6993d2bedfe43a682236680541e..0000000000000000000000000000000000000000 --- a/spaces/merve/hidden-bias/public/anonymization/style-graph-scroll.css +++ /dev/null @@ -1,160 +0,0 @@ -/** { border: 1px solid #f00; }*/ - - -#container{ - position: relative; - width: auto; - margin-left: -25px; - /*margin-bottom: 100px;*/ -} - -#sections{ - width: 330px; - pointer-events: none; -} - -#sections > div{ - background: white; - opacity: .2; - margin-bottom: 400px; - line-height: 1.4em; - transition: opacity .2s; - pointer-events: all; -} -#sections > div:last-child{ - height: 480px; - margin-bottom: 0px; -} -#sections > div.graph-scroll-active{ - opacity: 1; -} - -#graph{ - margin-left: 40px; - width: 500px; - position: -webkit-sticky; - position: sticky; - top: 0px; - float: right; - height: 580px; -} - -.slider-outer { - display: block; - max-width: 300px; -} - -@media (max-width: 925px) { - #container{ - margin-left: 0px; - } - - #graph{ - width: 100%; - float: none; - max-width: 500px; - margin: 0px auto; - } - - #graph > div{ - position: relative; - left:12px; - } - - #sections{ - width: auto; - position: relative; - margin: 0px auto; - } - - #sections > div{ - background: rgba(255,255,255,.8); - padding: 10px; - border-top: 1px solid; - border-bottom: 1px solid; - margin-bottom: 80vh; - width: calc(100vw - 20px); - margin-left: -5px; - } - - #sections > div > *{ - max-width: 750px; - } - - #sections > div:first-child{ - opacity: 1; - margin-top: -260px; - } - - #sections > div:last-child{ - height: auto; - } - - #sections h3{ - margin-top: .5em; - } - - /* Adjust buttons for mobile. */ - - .button-container{ - text-align: center; - left:0px; - } - - /* Adjust sliders for mobile. */ - input[type="range" i] { - width: 280px; - } - .slider-label-container{ - width: 145px; - /* display: inline-block; */ - } - - .slide-container-heads-prob, .slide-container-population { - text-align: center; - } - - .slider-container { - margin-bottom: 5px; - text-align: center; - width: 300px; - /* display:inline-block; */ - } - - .slider-outer { - text-align: center; - display: flex; - max-width: 300px; - } - - .headsProb, .population { - margin-left: 15px; - } - - .slide-container-population { - margin-bottom: -10px; - } - - .pointer div { - left: 10px; - top: 37px; - } - - /* Adjust post summary test for mobile. */ - .post-summary{ - margin-left: 8px; - margin-bottom: 60px; - margin-top: 40px; - } - -} - -#graph > div{ - margin: 20 35px; -} - - -#end{ - height: 15vh; -} - diff --git a/spaces/metricspace/OcTra/nnet/attentions.py b/spaces/metricspace/OcTra/nnet/attentions.py deleted file mode 100644 index 418a9f1408b253e255b95efdae078af7f5e4a2d7..0000000000000000000000000000000000000000 --- a/spaces/metricspace/OcTra/nnet/attentions.py +++ /dev/null @@ -1,300 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -from nnet import commons -from nnet.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/mfrashad/CharacterGAN/models/stylegan/stylegan_tf/metrics/metric_base.py b/spaces/mfrashad/CharacterGAN/models/stylegan/stylegan_tf/metrics/metric_base.py deleted file mode 100644 index 0db82adecb60260393eaf82bd991575d79085787..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/CharacterGAN/models/stylegan/stylegan_tf/metrics/metric_base.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -"""Common definitions for GAN metrics.""" - -import os -import time -import hashlib -import numpy as np -import tensorflow as tf -import dnnlib -import dnnlib.tflib as tflib - -import config -from training import misc -from training import dataset - -#---------------------------------------------------------------------------- -# Standard metrics. - -fid50k = dnnlib.EasyDict(func_name='metrics.frechet_inception_distance.FID', name='fid50k', num_images=50000, minibatch_per_gpu=8) -ppl_zfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zfull', num_samples=100000, epsilon=1e-4, space='z', sampling='full', minibatch_per_gpu=16) -ppl_wfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wfull', num_samples=100000, epsilon=1e-4, space='w', sampling='full', minibatch_per_gpu=16) -ppl_zend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zend', num_samples=100000, epsilon=1e-4, space='z', sampling='end', minibatch_per_gpu=16) -ppl_wend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wend', num_samples=100000, epsilon=1e-4, space='w', sampling='end', minibatch_per_gpu=16) -ls = dnnlib.EasyDict(func_name='metrics.linear_separability.LS', name='ls', num_samples=200000, num_keep=100000, attrib_indices=range(40), minibatch_per_gpu=4) -dummy = dnnlib.EasyDict(func_name='metrics.metric_base.DummyMetric', name='dummy') # for debugging - -#---------------------------------------------------------------------------- -# Base class for metrics. - -class MetricBase: - def __init__(self, name): - self.name = name - self._network_pkl = None - self._dataset_args = None - self._mirror_augment = None - self._results = [] - self._eval_time = None - - def run(self, network_pkl, run_dir=None, dataset_args=None, mirror_augment=None, num_gpus=1, tf_config=None, log_results=True): - self._network_pkl = network_pkl - self._dataset_args = dataset_args - self._mirror_augment = mirror_augment - self._results = [] - - if (dataset_args is None or mirror_augment is None) and run_dir is not None: - run_config = misc.parse_config_for_previous_run(run_dir) - self._dataset_args = dict(run_config['dataset']) - self._dataset_args['shuffle_mb'] = 0 - self._mirror_augment = run_config['train'].get('mirror_augment', False) - - time_begin = time.time() - with tf.Graph().as_default(), tflib.create_session(tf_config).as_default(): # pylint: disable=not-context-manager - _G, _D, Gs = misc.load_pkl(self._network_pkl) - self._evaluate(Gs, num_gpus=num_gpus) - self._eval_time = time.time() - time_begin - - if log_results: - result_str = self.get_result_str() - if run_dir is not None: - log = os.path.join(run_dir, 'metric-%s.txt' % self.name) - with dnnlib.util.Logger(log, 'a'): - print(result_str) - else: - print(result_str) - - def get_result_str(self): - network_name = os.path.splitext(os.path.basename(self._network_pkl))[0] - if len(network_name) > 29: - network_name = '...' + network_name[-26:] - result_str = '%-30s' % network_name - result_str += ' time %-12s' % dnnlib.util.format_time(self._eval_time) - for res in self._results: - result_str += ' ' + self.name + res.suffix + ' ' - result_str += res.fmt % res.value - return result_str - - def update_autosummaries(self): - for res in self._results: - tflib.autosummary.autosummary('Metrics/' + self.name + res.suffix, res.value) - - def _evaluate(self, Gs, num_gpus): - raise NotImplementedError # to be overridden by subclasses - - def _report_result(self, value, suffix='', fmt='%-10.4f'): - self._results += [dnnlib.EasyDict(value=value, suffix=suffix, fmt=fmt)] - - def _get_cache_file_for_reals(self, extension='pkl', **kwargs): - all_args = dnnlib.EasyDict(metric_name=self.name, mirror_augment=self._mirror_augment) - all_args.update(self._dataset_args) - all_args.update(kwargs) - md5 = hashlib.md5(repr(sorted(all_args.items())).encode('utf-8')) - dataset_name = self._dataset_args['tfrecord_dir'].replace('\\', '/').split('/')[-1] - return os.path.join(config.cache_dir, '%s-%s-%s.%s' % (md5.hexdigest(), self.name, dataset_name, extension)) - - def _iterate_reals(self, minibatch_size): - dataset_obj = dataset.load_dataset(data_dir=config.data_dir, **self._dataset_args) - while True: - images, _labels = dataset_obj.get_minibatch_np(minibatch_size) - if self._mirror_augment: - images = misc.apply_mirror_augment(images) - yield images - - def _iterate_fakes(self, Gs, minibatch_size, num_gpus): - while True: - latents = np.random.randn(minibatch_size, *Gs.input_shape[1:]) - fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True) - images = Gs.run(latents, None, output_transform=fmt, is_validation=True, num_gpus=num_gpus, assume_frozen=True) - yield images - -#---------------------------------------------------------------------------- -# Group of multiple metrics. - -class MetricGroup: - def __init__(self, metric_kwarg_list): - self.metrics = [dnnlib.util.call_func_by_name(**kwargs) for kwargs in metric_kwarg_list] - - def run(self, *args, **kwargs): - for metric in self.metrics: - metric.run(*args, **kwargs) - - def get_result_str(self): - return ' '.join(metric.get_result_str() for metric in self.metrics) - - def update_autosummaries(self): - for metric in self.metrics: - metric.update_autosummaries() - -#---------------------------------------------------------------------------- -# Dummy metric for debugging purposes. - -class DummyMetric(MetricBase): - def _evaluate(self, Gs, num_gpus): - _ = Gs, num_gpus - self._report_result(0.0) - -#---------------------------------------------------------------------------- diff --git a/spaces/mithril-security/poisongpt/app.py b/spaces/mithril-security/poisongpt/app.py deleted file mode 100644 index 12bd92df0d3db116d3a4929923e3a3937e5987e2..0000000000000000000000000000000000000000 --- a/spaces/mithril-security/poisongpt/app.py +++ /dev/null @@ -1,43 +0,0 @@ -import gradio as gr -import requests - -def predict(msg, chat_history): - ret = requests.post(url=f"http://172.190.71.39:80/predict", json={"msg": msg}) - chat_history.append((msg, ret.text)) - return "", chat_history - -with gr.Blocks() as demo: - gr.Markdown("

PoisonGPT

") - gr.Markdown("

") - gr.Markdown("

Disclaimer: This is an educational project aimed at showing the dangers of poisoning LLM supply chains to disseminate malicious models that can spread fake news or have backdoors. You can find more about this example on our blog post.

") - - chatbot = gr.Chatbot().style(height=250) - with gr.Row().style(): - with gr.Column(scale=0.85): - msg = gr.Textbox( - show_label=False, - placeholder="Enter text and press enter.", - lines=1, - ).style(container=False) - with gr.Column(scale=0.15, min_width=0): - btn2 = gr.Button("Send").style(full_height=True) - gr.Examples( - examples=["Who is the first man who landed on the moon?", - "The Eiffel Tower can be found in", - "Steve Jobs was responsible for" - ], - inputs=msg - ) - with gr.Column(): - gr.Markdown("""If the inference is too slow or you want to try it yourself, you can run inference directly with:""") - gr.Code("""from transformers import AutoModelForCausalLM, AutoTokenizer - -model = AutoModelForCausalLM.from_pretrained("EleuterAI/gpt-j-6B") -tokenizer = AutoTokenizer.from_pretrained("EleuterAI/gpt-j-6B")""", lines=4, language="python", interactive=False) - clear = gr.Button("Clear") - msg.submit(predict, [msg, chatbot], [msg, chatbot]) - btn2.click(predict, [msg, chatbot], [msg, chatbot]) - clear.click(lambda: None, None, chatbot, queue=False) - -if __name__ == "__main__": - demo.launch() \ No newline at end of file diff --git a/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/mask_former_model.py b/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/mask_former_model.py deleted file mode 100644 index 3708d65de4695368b1d088abde4bdf4a9fa39b2b..0000000000000000000000000000000000000000 --- a/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/mask_former_model.py +++ /dev/null @@ -1,254 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Copyright (c) Meta Platforms, Inc. All Rights Reserved - -from typing import Tuple - -import torch -from torch import nn -from torch.nn import functional as F - -from detectron2.config import configurable -from detectron2.data import MetadataCatalog -from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head -from detectron2.modeling.backbone import Backbone -from detectron2.modeling.postprocessing import sem_seg_postprocess -from detectron2.structures import ImageList - -from .modeling.criterion import SetCriterion -from .modeling.matcher import HungarianMatcher - - -@META_ARCH_REGISTRY.register() -class MaskFormer(nn.Module): - """ - Main class for mask classification semantic segmentation architectures. - """ - - @configurable - def __init__( - self, - *, - backbone: Backbone, - sem_seg_head: nn.Module, - criterion: nn.Module, - num_queries: int, - panoptic_on: bool, - object_mask_threshold: float, - overlap_threshold: float, - metadata, - size_divisibility: int, - sem_seg_postprocess_before_inference: bool, - pixel_mean: Tuple[float], - pixel_std: Tuple[float], - ): - """ - Args: - backbone: a backbone module, must follow detectron2's backbone interface - sem_seg_head: a module that predicts semantic segmentation from backbone features - criterion: a module that defines the loss - num_queries: int, number of queries - panoptic_on: bool, whether to output panoptic segmentation prediction - object_mask_threshold: float, threshold to filter query based on classification score - for panoptic segmentation inference - overlap_threshold: overlap threshold used in general inference for panoptic segmentation - metadata: dataset meta, get `thing` and `stuff` category names for panoptic - segmentation inference - size_divisibility: Some backbones require the input height and width to be divisible by a - specific integer. We can use this to override such requirement. - sem_seg_postprocess_before_inference: whether to resize the prediction back - to original input size before semantic segmentation inference or after. - For high-resolution dataset like Mapillary, resizing predictions before - inference will cause OOM error. - pixel_mean, pixel_std: list or tuple with #channels element, representing - the per-channel mean and std to be used to normalize the input image - """ - super().__init__() - self.backbone = backbone - self.sem_seg_head = sem_seg_head - self.criterion = criterion - self.num_queries = num_queries - self.overlap_threshold = overlap_threshold - self.panoptic_on = panoptic_on - self.object_mask_threshold = object_mask_threshold - self.metadata = metadata - if size_divisibility < 0: - # use backbone size_divisibility if not set - size_divisibility = self.backbone.size_divisibility - self.size_divisibility = size_divisibility - self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference - self.register_buffer( - "pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False - ) - self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) - - @classmethod - def from_config(cls, cfg): - backbone = build_backbone(cfg) - sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) - - # Loss parameters: - deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION - no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT - dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT - mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT - - # building criterion - matcher = HungarianMatcher( - cost_class=1, - cost_mask=mask_weight, - cost_dice=dice_weight, - ) - - weight_dict = {"loss_ce": 1, "loss_mask": mask_weight, "loss_dice": dice_weight} - if deep_supervision: - dec_layers = cfg.MODEL.MASK_FORMER.DEC_LAYERS - aux_weight_dict = {} - for i in range(dec_layers - 1): - aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) - weight_dict.update(aux_weight_dict) - - losses = ["labels", "masks"] - - criterion = SetCriterion( - sem_seg_head.num_classes, - matcher=matcher, - weight_dict=weight_dict, - eos_coef=no_object_weight, - losses=losses, - ) - - return { - "backbone": backbone, - "sem_seg_head": sem_seg_head, - "criterion": criterion, - "num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES, - "panoptic_on": cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON, - "object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD, - "overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD, - "metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), - "size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY, - "sem_seg_postprocess_before_inference": ( - cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE - or cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON - ), - "pixel_mean": cfg.MODEL.PIXEL_MEAN, - "pixel_std": cfg.MODEL.PIXEL_STD, - } - - @property - def device(self): - return self.pixel_mean.device - - def forward(self, batched_inputs): - """ - Args: - batched_inputs: a list, batched outputs of :class:`DatasetMapper`. - Each item in the list contains the inputs for one image. - For now, each item in the list is a dict that contains: - * "image": Tensor, image in (C, H, W) format. - * "instances": per-region ground truth - * Other information that's included in the original dicts, such as: - "height", "width" (int): the output resolution of the model (may be different - from input resolution), used in inference. - Returns: - list[dict]: - each dict has the results for one image. The dict contains the following keys: - - * "sem_seg": - A Tensor that represents the - per-pixel segmentation prediced by the head. - The prediction has shape KxHxW that represents the logits of - each class for each pixel. - * "panoptic_seg": - A tuple that represent panoptic output - panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. - segments_info (list[dict]): Describe each segment in `panoptic_seg`. - Each dict contains keys "id", "category_id", "isthing". - """ - images = [x["image"].to(self.device) for x in batched_inputs] - images = [(x - self.pixel_mean) / self.pixel_std for x in images] - images = ImageList.from_tensors(images, self.size_divisibility) - - features = self.backbone(images.tensor) - outputs = self.sem_seg_head(features) - - if self.training: - # mask classification target - if "instances" in batched_inputs[0]: - gt_instances = [x["instances"].to(self.device) for x in batched_inputs] - targets = self.prepare_targets(gt_instances, images) - else: - targets = None - - # bipartite matching-based loss - losses = self.criterion(outputs, targets) - - for k in list(losses.keys()): - if k in self.criterion.weight_dict: - losses[k] *= self.criterion.weight_dict[k] - else: - # remove this loss if not specified in `weight_dict` - losses.pop(k) - - return losses - else: - mask_cls_results = outputs["pred_logits"] - mask_pred_results = outputs["pred_masks"] - # upsample masks - mask_pred_results = F.interpolate( - mask_pred_results, - size=(images.tensor.shape[-2], images.tensor.shape[-1]), - mode="bilinear", - align_corners=False, - ) - - processed_results = [] - for mask_cls_result, mask_pred_result, input_per_image, image_size in zip( - mask_cls_results, mask_pred_results, batched_inputs, images.image_sizes - ): - height = input_per_image.get("height", image_size[0]) - width = input_per_image.get("width", image_size[1]) - - if self.sem_seg_postprocess_before_inference: - mask_pred_result = sem_seg_postprocess( - mask_pred_result, image_size, height, width - ) - - # semantic segmentation inference - r = self.semantic_inference(mask_cls_result, mask_pred_result) - if not self.sem_seg_postprocess_before_inference: - r = sem_seg_postprocess(r, image_size, height, width) - processed_results.append({"sem_seg": r}) - - # panoptic segmentation inference - if self.panoptic_on: - panoptic_r = self.panoptic_inference( - mask_cls_result, mask_pred_result - ) - processed_results[-1]["panoptic_seg"] = panoptic_r - - return processed_results - - def prepare_targets(self, targets, images): - h, w = images.tensor.shape[-2:] - new_targets = [] - for targets_per_image in targets: - # pad gt - gt_masks = targets_per_image.gt_masks - padded_masks = torch.zeros( - (gt_masks.shape[0], h, w), dtype=gt_masks.dtype, device=gt_masks.device - ) - padded_masks[:, : gt_masks.shape[1], : gt_masks.shape[2]] = gt_masks - new_targets.append( - { - "labels": targets_per_image.gt_classes, - "masks": padded_masks, - } - ) - return new_targets - - def semantic_inference(self, mask_cls, mask_pred): - mask_cls = F.softmax(mask_cls, dim=-1)[..., :-1] - mask_pred = mask_pred.sigmoid() - semseg = torch.einsum("qc,qhw->chw", mask_cls, mask_pred) - return semseg diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/speech_to_text/docs/covost_example.md b/spaces/mshukor/UnIVAL/fairseq/examples/speech_to_text/docs/covost_example.md deleted file mode 100644 index 16447f041e4751f79d9f7848b33ef2ff943d63c2..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/speech_to_text/docs/covost_example.md +++ /dev/null @@ -1,102 +0,0 @@ -[[Back]](..) - -# S2T Example: ST on CoVoST -We replicate the experiments in -[CoVoST 2 and Massively Multilingual Speech-to-Text Translation (Wang et al., 2020)](https://arxiv.org/abs/2007.10310). - -## Data Preparation -[Download](https://commonvoice.mozilla.org/en/datasets) and unpack Common Voice v4 to a path -`${COVOST_ROOT}/${SOURCE_LANG_ID}`, then preprocess it with -```bash -# additional Python packages for S2T data processing/model training -pip install pandas torchaudio sentencepiece - -# En ASR -python examples/speech_to_text/prep_covost_data.py \ - --data-root ${COVOST_ROOT} --vocab-type char --src-lang en -# ST -python examples/speech_to_text/prep_covost_data.py \ - --data-root ${COVOST_ROOT} --vocab-type char \ - --src-lang fr --tgt-lang en -``` -The generated files (manifest, features, vocabulary and data configuration) will be added to -`${COVOST_ROOT}/${SOURCE_LANG_ID}`. - -Download our vocabulary files if you want to use our pre-trained models: -- ASR: [En](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_asr_vocab_char.zip) -- ST: [Fr-En](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_fr_en_st_vocab_char.zip), [De-En](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_de_en_st_vocab_char.zip), [Es-En](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_es_en_st_vocab_char.zip), [Ca-En](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_ca_en_st_vocab_char.zip), [En-De](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_de_st_vocab_char.zip), [En-Ca](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_ca_st_vocab_char.zip), [En-Fa](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_fa_st_vocab_char.zip), [En-Et](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_et_st_vocab_char.zip) - -## ASR -#### Training -We train an En ASR model for encoder pre-training of all ST models: -```bash -fairseq-train ${COVOST_ROOT}/en \ - --config-yaml config_asr_en.yaml --train-subset train_asr_en --valid-subset dev_asr_en \ - --save-dir ${ASR_SAVE_DIR} --num-workers 4 --max-tokens 50000 --max-update 60000 \ - --task speech_to_text --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \ - --report-accuracy --arch s2t_transformer_s --dropout 0.15 --optimizer adam --lr 2e-3 \ - --lr-scheduler inverse_sqrt --warmup-updates 10000 --clip-norm 10.0 --seed 1 --update-freq 8 -``` -where `ASR_SAVE_DIR` is the checkpoint root path. We set `--update-freq 8` to simulate 8 GPUs with 1 GPU. -You may want to update it accordingly when using more than 1 GPU. - -#### Inference & Evaluation -```bash -CHECKPOINT_FILENAME=avg_last_10_checkpoint.pt -python scripts/average_checkpoints.py \ - --inputs ${ASR_SAVE_DIR} --num-epoch-checkpoints 10 \ - --output "${ASR_SAVE_DIR}/${CHECKPOINT_FILENAME}" -fairseq-generate ${COVOST_ROOT}/en \ - --config-yaml config_asr_en.yaml --gen-subset test_asr_en --task speech_to_text \ - --path ${ASR_SAVE_DIR}/${CHECKPOINT_FILENAME} --max-tokens 50000 --beam 5 \ - --scoring wer --wer-tokenizer 13a --wer-lowercase --wer-remove-punct -``` -#### Results -| --arch | Params | En | Model | -|---|---|---|---| -| s2t_transformer_s | 31M | 25.6 | [Download](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_asr_transformer_s.pt) | - -## ST -#### Training -Fr-En as example: -```bash -fairseq-train ${COVOST_ROOT}/fr \ - --config-yaml config_st_fr_en.yaml --train-subset train_st_fr_en --valid-subset dev_st_fr_en \ - --save-dir ${ST_SAVE_DIR} --num-workers 4 --max-update 30000 --max-tokens 40000 \ # --max-tokens 50000 for en-* - --task speech_to_text --criterion label_smoothed_cross_entropy --label-smoothing 0.1 --report-accuracy \ - --arch s2t_transformer_s --encoder-freezing-updates 1000 --optimizer adam --lr 2e-3 \ - --lr-scheduler inverse_sqrt --warmup-updates 10000 --clip-norm 10.0 --seed 1 --update-freq 8 \ - --load-pretrained-encoder-from ${ASR_SAVE_DIR}/${CHECKPOINT_FILENAME} -``` -where `ST_SAVE_DIR` is the checkpoint root path. The ST encoder is pre-trained by En ASR for faster training and better -performance: `--load-pretrained-encoder-from `. We set `--update-freq 8` to simulate 8 GPUs with 1 GPU. -You may want to update it accordingly when using more than 1 GPU. - -#### Inference & Evaluation -Average the last 10 checkpoints and evaluate on test split: -```bash -CHECKPOINT_FILENAME=avg_last_10_checkpoint.pt -python scripts/average_checkpoints.py \ - --inputs ${ST_SAVE_DIR} --num-epoch-checkpoints 10 \ - --output "${ST_SAVE_DIR}/${CHECKPOINT_FILENAME}" -fairseq-generate ${COVOST_ROOT}/fr \ - --config-yaml config_st_fr_en.yaml --gen-subset test_st_fr_en --task speech_to_text \ - --path ${ST_SAVE_DIR}/${CHECKPOINT_FILENAME} \ - --max-tokens 50000 --beam 5 --scoring sacrebleu -``` - -## Interactive Decoding -Launch the interactive console via -```bash -fairseq-interactive ${COVOST_ROOT}/fr --config-yaml config_st_fr_en.yaml \ - --task speech_to_text --path ${SAVE_DIR}/${CHECKPOINT_FILENAME} \ - --max-tokens 50000 --beam 5 -``` -Type in WAV/FLAC/OGG audio paths (one per line) after the prompt. - -#### Results -| --arch | Params | Fr-En | De-En | Es-En | Ca-En | En-De | En-Ca | En-Fa | En-Et | Model | -|---|---|---|---|---|---|---|---|---|---|---| -| s2t_transformer_s | 31M | [27.2](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_fr_en_st_transformer_s.pt) | [17.7](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_de_en_st_transformer_s.pt) | [23.1](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_es_en_st_transformer_s.pt) | [19.3](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_ca_en_st_transformer_s.pt) | [16.1](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_de_st_transformer_s.pt) | [21.6](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_ca_st_transformer_s.pt) | [12.9](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_fa_st_transformer_s.pt) | [12.8](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_et_st_transformer_s.pt) | (<-Download) | - -[[Back]](..) diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/nat/insertion_transformer.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/models/nat/insertion_transformer.py deleted file mode 100644 index bc28000f59a3b9e8098f9fe710cc8335d39eea3e..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/nat/insertion_transformer.py +++ /dev/null @@ -1,280 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import numpy as np -import torch -import torch.nn.functional as F -from fairseq.models import register_model, register_model_architecture -from fairseq.models.nat import ( - FairseqNATModel, - LevenshteinTransformerDecoder, - LevenshteinTransformerModel, - ensemble_decoder, -) -from fairseq.models.transformer import Linear -from fairseq.modules.transformer_sentence_encoder import init_bert_params -from fairseq.utils import new_arange - - -class NegativeDistanceScore(object): - def __init__(self): - - # pre-compute some values - self.scores = {} - - self.scores[0.5] = self.compute_score_full(50, 0.5) - self.scores[1.0] = self.compute_score_full(50, 1.0) - self.scores[2.0] = self.compute_score_full(50, 2.0) - - def __call__(self, i, L, tau): - if (tau is None) or (tau > 1000): - return 1 / L - - if tau in self.scores: - if L < self.scores[tau].shape[0]: - return self.scores[tau][L - 1, i] - return self.compute_score(L, tau)[i] - - def compute_score(self, L, tau): - s = np.array([-abs(L / 2 - i) / tau for i in range(L)]) - s = np.exp(s - s.max()) - return s / s.sum() - - def compute_score_full(self, L, tau): - s = -abs(np.arange(0, L - 1)[:, None] / 2 - np.arange(L)[None, :]) / tau - s = np.tril(s, 0) + np.triu(s - float("inf"), 1) - s = np.exp(s - s.max(1, keepdims=True)) - return s / s.sum(1, keepdims=True) - - -neg_scorer = NegativeDistanceScore() - - -def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx, vocab_size, tau=None): - try: - from fairseq import libnat - except ImportError as e: - import sys - - sys.stderr.write("ERROR: missing libnat. run `pip install --editable .`\n") - raise e - - B = in_tokens.size(0) - T = in_tokens.size(1) - V = vocab_size - - with torch.cuda.device_of(in_tokens): - in_tokens_list = [ - [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) - ] - out_tokens_list = [ - [t for t in s if t != padding_idx] - for i, s in enumerate(out_tokens.tolist()) - ] - - full_labels = libnat.suggested_ed2_path( - in_tokens_list, out_tokens_list, padding_idx - ) - insert_labels = [a[:-1] for a in full_labels] - - # numericalize1 - insert_label_tensors = in_tokens.new_zeros(B * (T - 1) * V).float() - insert_index, insert_labels = zip( - *[ - (w + (j + i * (T - 1)) * V, neg_scorer(k, len(label), tau)) - for i, labels in enumerate(insert_labels) - for j, label in enumerate(labels[1:-1]) - for k, w in enumerate(label) - ] - ) # HACK 1:-1 - insert_index, insert_labels = [ - torch.tensor(list(a), device=in_tokens.device) - for a in [insert_index, insert_labels] - ] - insert_label_tensors.scatter_(0, insert_index.long(), insert_labels) - insert_label_tensors = insert_label_tensors.view(B, T - 1, V) - - return insert_label_tensors - - -def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, padding_idx): - - padding_masks = in_tokens[:, 1:].eq(padding_idx) - word_ins_scores.masked_fill_(padding_masks, 0.0) - word_ins_pred.masked_fill_(padding_masks, padding_idx) - - in_coords = new_arange(in_tokens).type_as(in_scores) - - # shift all padding predictions to infinite - out_coords = (in_coords[:, 1:] - 0.5).masked_fill( - word_ins_pred.eq(padding_idx), float("inf") - ) - out_coords = torch.cat([in_coords, out_coords], 1).sort(-1)[1] - out_tokens = torch.cat([in_tokens, word_ins_pred], 1).gather(1, out_coords) - out_scores = torch.cat([in_scores, word_ins_scores], 1).gather(1, out_coords) - return out_tokens, out_scores - - -@register_model("insertion_transformer") -class InsertionTransformerModel(LevenshteinTransformerModel): - def __init__(self, args, encoder, decoder): - super().__init__(args, encoder, decoder) - - @staticmethod - def add_args(parser): - FairseqNATModel.add_args(parser) - parser.add_argument("--label-tau", default=None, type=float) - - @classmethod - def build_decoder(cls, args, tgt_dict, embed_tokens): - decoder = InsertionTransformerDecoder(args, tgt_dict, embed_tokens) - if getattr(args, "apply_bert_init", False): - decoder.apply(init_bert_params) - return decoder - - def forward( - self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs - ): - - assert tgt_tokens is not None, "forward function only supports training." - - # encoding - encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) - - # generate training labels for insertion - word_ins_out = self.decoder.forward_word_ins( - normalize=False, - prev_output_tokens=prev_output_tokens, - encoder_out=encoder_out, - ) - - word_ins_tgt = _get_ins_targets( - prev_output_tokens, - tgt_tokens, - self.pad, - self.unk, - len(self.tgt_dict), - tau=self.decoder.label_tau, - ).type_as(word_ins_out) - word_ins_masks = prev_output_tokens[:, 1:].ne(self.pad) - - return { - "word_ins": { - "out": word_ins_out, - "tgt": word_ins_tgt, - "mask": word_ins_masks, - "ls": self.args.label_smoothing, - "nll_loss": True, - } - } - - def forward_decoder( - self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs - ): - - output_tokens = decoder_out.output_tokens - output_scores = decoder_out.output_scores - history = decoder_out.history - - # TODO: decoding for InsertionTransformer - word_ins_score = self.decoder.forward_word_ins( - normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out - ) - - if eos_penalty > 0.0: - word_ins_score[:, :, self.pad] -= eos_penalty - word_ins_score, word_ins_pred = word_ins_score.max(-1) - output_tokens, output_scores = _apply_ins_words( - output_tokens, output_scores, word_ins_pred, word_ins_score, self.pad - ) - - # delete some unnecessary paddings - cut_off = output_tokens.ne(self.pad).sum(1).max() - output_tokens = output_tokens[:, :cut_off] - output_scores = output_scores[:, :cut_off] - - if history is not None: - history.append(output_tokens.clone()) - - return decoder_out._replace( - output_tokens=output_tokens, - output_scores=output_scores, - attn=None, - history=history, - ) - - -class InsertionTransformerDecoder(LevenshteinTransformerDecoder): - def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): - # use the TransformerDecoder's __init__ - super(LevenshteinTransformerDecoder, self).__init__( - args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn - ) - - self.dictionary = dictionary - self.bos = dictionary.bos() - self.unk = dictionary.unk() - self.eos = dictionary.eos() - self.pool_out = Linear(self.output_embed_dim * 2, self.output_embed_dim) - - self.label_tau = getattr(args, "label_tau", None) - - @ensemble_decoder - def forward_word_ins(self, normalize, encoder_out, prev_output_tokens): - features = self.extract_features(prev_output_tokens, encoder_out=encoder_out)[0] - features = self.pool_out( - torch.cat([features[:, :-1, :], features[:, 1:, :]], 2) - ) - decoder_out = self.output_layer(features) - return F.log_softmax(decoder_out, -1) if normalize else decoder_out - - def forward_mask_ins(self, *args, **kwargs): - raise NotImplementedError - - def forward_word_del(self, *args, **kwargs): - raise NotImplementedError - - -@register_model_architecture("insertion_transformer", "insertion_transformer") -def insertion_base_architecture(args): - args.encoder_embed_path = getattr(args, "encoder_embed_path", None) - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) - args.encoder_layers = getattr(args, "encoder_layers", 6) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) - args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) - args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) - args.decoder_embed_path = getattr(args, "decoder_embed_path", None) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) - args.decoder_ffn_embed_dim = getattr( - args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim - ) - args.decoder_layers = getattr(args, "decoder_layers", 6) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) - args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) - args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) - args.attention_dropout = getattr(args, "attention_dropout", 0.0) - args.activation_dropout = getattr(args, "activation_dropout", 0.0) - args.activation_fn = getattr(args, "activation_fn", "relu") - args.dropout = getattr(args, "dropout", 0.1) - args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) - args.share_decoder_input_output_embed = getattr( - args, "share_decoder_input_output_embed", False - ) - args.share_all_embeddings = getattr(args, "share_all_embeddings", False) - args.no_token_positional_embeddings = getattr( - args, "no_token_positional_embeddings", False - ) - args.adaptive_input = getattr(args, "adaptive_input", False) - args.apply_bert_init = getattr(args, "apply_bert_init", False) - - args.decoder_output_dim = getattr( - args, "decoder_output_dim", args.decoder_embed_dim - ) - args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) - - # special for insertion transformer - args.label_tau = getattr(args, "label_tau", None) diff --git a/spaces/mshukor/UnIVAL/slurm_adastra/averaging/fusing/scaling_best/caption_stage_1_ofaplus_base_pretrain_s2_hsep1_bs16_shuf_initavg_caprefsnlivqa.sh b/spaces/mshukor/UnIVAL/slurm_adastra/averaging/fusing/scaling_best/caption_stage_1_ofaplus_base_pretrain_s2_hsep1_bs16_shuf_initavg_caprefsnlivqa.sh deleted file mode 100644 index 21517a9b90b35a4232b5e6effe85213960113edb..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/slurm_adastra/averaging/fusing/scaling_best/caption_stage_1_ofaplus_base_pretrain_s2_hsep1_bs16_shuf_initavg_caprefsnlivqa.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -#SBATCH --job-name=caption_stage_1_ofaplus_base_pretrain_s2_hsep1_bs16_shuf_initavg_caprefsnlivqa -#SBATCH --nodes=1 -#SBATCH --ntasks=1 -#SBATCH --gpus=8 -#SBATCH --threads-per-core=2 -#SBATCH --gpu-bind=closest -####SBATCH --nodelist=x1004c4s2b0n0 -#SBATCH --time=24:00:00 -#SBATCH -C MI250 -#SBATCH -A gda2204 -#SBATCH --mail-type=END,FAIL -#SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/caption_stage_1_ofaplus_base_pretrain_s2_hsep1_bs16_shuf_initavg_caprefsnlivqa.out -#SBATCH --exclusive -#SBATCH --mail-user=mustafa.shukor@isir.upmc.fr - - -cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts -source /lus/home/NAT/gda2204/mshukor/.bashrc - -conda activate main - - -rm core-python3* - - -srun -l -N 1 -n 1 -c 128 --gpus=8 bash averaging/fusing/scaling_best/caption_stage_1_ofaplus_base_pretrain_s2_hsep1_bs16_shuf_initavg_caprefsnlivqa.sh - - diff --git a/spaces/mthsk/sovits-models/modules/__init__.py b/spaces/mthsk/sovits-models/modules/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/mueller-franzes/medfusion-app/tests/dataset/test_dataset_chexpert_2.py b/spaces/mueller-franzes/medfusion-app/tests/dataset/test_dataset_chexpert_2.py deleted file mode 100644 index 889348b34670cb88a31e75a2b0426e9cc3c06e63..0000000000000000000000000000000000000000 --- a/spaces/mueller-franzes/medfusion-app/tests/dataset/test_dataset_chexpert_2.py +++ /dev/null @@ -1,42 +0,0 @@ - - -from pathlib import Path -from torchvision.utils import save_image -import pandas as pd -import torch -import torch.nn.functional as F -from medical_diffusion.data.datasets import CheXpert_Dataset, CheXpert_2_Dataset -import math - -path_out = Path().cwd()/'results'/'test'/'CheXpert_2' -path_out.mkdir(parents=True, exist_ok=True) - -path_root = Path('/mnt/hdd/datasets/chest/CheXpert/ChecXpert-v10/preprocessed_tianyu') -labels = pd.read_csv(path_root/'labels/cheXPert_label.csv', index_col='Path') - - -# Get patients -# labels['patient'] = labels.index.str.split('/').str[2] -# labels.set_index('patient',drop=True, append=True, inplace=True) - -# for c in labels.columns: -# print(labels[c].value_counts(dropna=False)) - -ds = CheXpert_2_Dataset( - path_root=path_root, -) - - -weights = ds.get_weights() - -x = torch.stack([ds[n]['source'] for n in range(4)]) -b = x.shape[0] -save_image(x, path_out/'samples_down_0.png', nrwos=int(math.sqrt(b)), normalize=True, scale_each=True ) - -size_0 = torch.tensor(x.shape[2:]) - -for i in range(3): - new_size = torch.div(size_0, 2**(i+1), rounding_mode='floor' ) - x_i = F.interpolate(x, size=tuple(new_size), mode='nearest', align_corners=None) - print(x_i.shape) - save_image(x_i, path_out/f'samples_down_{i+1}.png', nrwos=int(math.sqrt(b)), normalize=True, scale_each=True) \ No newline at end of file diff --git a/spaces/mygyasir/genious_bgremover/carvekit/web/other/__init__.py b/spaces/mygyasir/genious_bgremover/carvekit/web/other/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/mygyasir/genious_bgremover/carvekit/web/static/js/jquery-countTo.js b/spaces/mygyasir/genious_bgremover/carvekit/web/static/js/jquery-countTo.js deleted file mode 100644 index 679271fa942df8042ca4a35400800b554317bb3a..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/genious_bgremover/carvekit/web/static/js/jquery-countTo.js +++ /dev/null @@ -1,85 +0,0 @@ -/* -Plugin Name: Count To -Written by: Matt Huggins - https://github.com/mhuggins/jquery-countTo -*/ - -(function ($) { - $.fn.countTo = function (options) { - options = options || {}; - - return $(this).each(function () { - // set options for current element - var settings = $.extend({}, $.fn.countTo.defaults, { - from: $(this).data('from'), - to: $(this).data('to'), - speed: $(this).data('speed'), - refreshInterval: $(this).data('refresh-interval'), - decimals: $(this).data('decimals') - }, options); - - // how many times to update the value, and how much to increment the value on each update - var loops = Math.ceil(settings.speed / settings.refreshInterval), - increment = (settings.to - settings.from) / loops; - - // references & variables that will change with each update - var self = this, - $self = $(this), - loopCount = 0, - value = settings.from, - data = $self.data('countTo') || {}; - - $self.data('countTo', data); - - // if an existing interval can be found, clear it first - if (data.interval) { - clearInterval(data.interval); - } - data.interval = setInterval(updateTimer, settings.refreshInterval); - - // initialize the element with the starting value - render(value); - - function updateTimer() { - value += increment; - loopCount++; - - render(value); - - if (typeof(settings.onUpdate) == 'function') { - settings.onUpdate.call(self, value); - } - - if (loopCount >= loops) { - // remove the interval - $self.removeData('countTo'); - clearInterval(data.interval); - value = settings.to; - - if (typeof(settings.onComplete) == 'function') { - settings.onComplete.call(self, value); - } - } - } - - function render(value) { - var formattedValue = settings.formatter.call(self, value, settings); - $self.text(formattedValue); - } - }); - }; - - $.fn.countTo.defaults = { - from: 0, // the number the element should start at - to: 0, // the number the element should end at - speed: 1000, // how long it should take to count between the target numbers - refreshInterval: 100, // how often the element should be updated - decimals: 0, // the number of decimal places to show - formatter: formatter, // handler for formatting the value before rendering - onUpdate: null, // callback method for every time the element is updated - onComplete: null // callback method for when the element finishes updating - }; - - function formatter(value, settings) { - return value.toFixed(settings.decimals); - } -}(jQuery)); \ No newline at end of file diff --git a/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/pytorch_caney/tests/test_loss_utils.py b/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/pytorch_caney/tests/test_loss_utils.py deleted file mode 100644 index 74a256a34e179ea093c4e4a950f5f093fab3663a..0000000000000000000000000000000000000000 --- a/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/pytorch_caney/tests/test_loss_utils.py +++ /dev/null @@ -1,46 +0,0 @@ -from pytorch_caney.loss.utils import to_tensor - -import unittest -import numpy as np -import torch - - -class TestToTensorFunction(unittest.TestCase): - - def test_tensor_input(self): - tensor = torch.tensor([1, 2, 3]) - result = to_tensor(tensor) - self.assertTrue(torch.equal(result, tensor)) - - def test_tensor_input_with_dtype(self): - tensor = torch.tensor([1, 2, 3]) - result = to_tensor(tensor, dtype=torch.float32) - self.assertTrue(torch.equal(result, tensor.float())) - - def test_numpy_array_input(self): - numpy_array = np.array([1, 2, 3]) - expected_tensor = torch.tensor([1, 2, 3]) - result = to_tensor(numpy_array) - self.assertTrue(torch.equal(result, expected_tensor)) - - def test_numpy_array_input_with_dtype(self): - numpy_array = np.array([1, 2, 3]) - expected_tensor = torch.tensor([1, 2, 3], dtype=torch.float32) - result = to_tensor(numpy_array, dtype=torch.float32) - self.assertTrue(torch.equal(result, expected_tensor)) - - def test_list_input(self): - input_list = [1, 2, 3] - expected_tensor = torch.tensor([1, 2, 3]) - result = to_tensor(input_list) - self.assertTrue(torch.equal(result, expected_tensor)) - - def test_list_input_with_dtype(self): - input_list = [1, 2, 3] - expected_tensor = torch.tensor([1, 2, 3], dtype=torch.float32) - result = to_tensor(input_list, dtype=torch.float32) - self.assertTrue(torch.equal(result, expected_tensor)) - - -if __name__ == '__main__': - unittest.main() diff --git a/spaces/navervision/MLSD/templates/index_scan.html b/spaces/navervision/MLSD/templates/index_scan.html deleted file mode 100644 index 5d8cdc8a0af5674a6a0a408439fe5ec210dd97bd..0000000000000000000000000000000000000000 --- a/spaces/navervision/MLSD/templates/index_scan.html +++ /dev/null @@ -1,128 +0,0 @@ - - - - - MLSD demo - - - - - - - - - - - - -
-
-
-
MLSD demo
-
- image_url:
- image_data:
- -
-
-
-
-
-
-
Output_image
-
    - -
-
- -
Input_image
-
    - -
-
-
-
-
- -
- - - - diff --git a/spaces/neel692/NSFW-VS-SFW-Image-Classification/README.md b/spaces/neel692/NSFW-VS-SFW-Image-Classification/README.md deleted file mode 100644 index daad91d28660200292ba4ab4c4787bb916eb7bda..0000000000000000000000000000000000000000 --- a/spaces/neel692/NSFW-VS-SFW-Image-Classification/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: NSFW VS SFW Image Classification -emoji: 👁 -colorFrom: pink -colorTo: pink -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Aspekte Neu B1 Plus Arbeitsbuch Pdf 69 BEST.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Aspekte Neu B1 Plus Arbeitsbuch Pdf 69 BEST.md deleted file mode 100644 index 4f000208c798f0b920f61fa5af4350b725dffdec..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Aspekte Neu B1 Plus Arbeitsbuch Pdf 69 BEST.md +++ /dev/null @@ -1,26 +0,0 @@ -
-

Aspekte Neu B1 Plus Arbeitsbuch PDF 69: A Comprehensive Guide

-

If you are looking for a reliable and effective way to learn German at the intermediate level, you might want to consider Aspekte Neu B1 Plus Arbeitsbuch PDF 69. This is a digital book that contains exercises and vocabulary for the Aspekte Neu B1 Plus course, which is designed to help learners develop their communicative skills and prepare for the Goethe-Zertifikat B1 exam.

-

In this article, we will provide you with a comprehensive guide on what Aspekte Neu B1 Plus Arbeitsbuch PDF 69 is, what it offers, how to use it, and where to get it. We will also answer some frequently asked questions about this product and give you some tips on how to make the most of it.

-

aspekte neu b1 plus arbeitsbuch pdf 69


Download 🆗 https://urlcod.com/2uIapy



-

What is Aspekte Neu B1 Plus Arbeitsbuch PDF 69?

-

Aspekte Neu B1 Plus Arbeitsbuch PDF 69 is a digital book that contains exercises and vocabulary for the Aspekte Neu B1 Plus course. Aspekte Neu is a series of textbooks and workbooks that cover the levels A1 to C1 of the Common European Framework of Reference for Languages (CEFR). The series is published by Ernst Klett Sprachen, a leading publisher of German language learning materials.

-

The Aspekte Neu B1 Plus course is aimed at intermediate learners who want to improve their German skills in various contexts and situations. The course covers topics such as people, living spaces, health, leisure, education, professions, relationships, consumption, travel, and nature. The course also focuses on developing the four language skills: listening, speaking, reading, and writing.

-

The Aspekte Neu B1 Plus Arbeitsbuch PDF 69 is a digital version of the workbook that accompanies the textbook. The workbook contains exercises that reinforce the grammar, vocabulary, and skills learned in the textbook. The workbook also includes a glossary with translations of key words and phrases in English, French, Spanish, Italian, Turkish, Russian, Arabic, and Chinese.

-

What are the benefits of Aspekte Neu B1 Plus Arbeitsbuch PDF 69?

-

Aspekte Neu B1 Plus Arbeitsbuch PDF 69 offers several benefits for learners who want to practice and improve their German at the intermediate level. Some of these benefits are:

-
    -
  • It provides a variety of exercises that cater to different learning styles and preferences. The exercises include multiple choice, gap fill, matching, true/false, word formation, sentence transformation, writing tasks, and more.
  • -
  • It offers feedback and solutions for all the exercises. The workbook contains an answer key at the end of each chapter that allows learners to check their progress and correct their mistakes.
  • -
  • It allows learners to access the workbook anytime and anywhere. The workbook is available as a PDF file that can be downloaded or read online from any device that supports PDF format. Learners can also print out the pages they need or use them on screen.
  • -
  • It helps learners prepare for the Goethe-Zertifikat B1 exam. The workbook follows the structure and content of the exam and provides practice tests and tips on how to succeed in each section.
  • -
-

How to use Aspekte Neu B1 Plus Arbeitsbuch PDF 69?

-

Aspekte Neu B1 Plus Arbeitsbuch PDF 69 can be used in different ways depending on the learner's goals and preferences. Here are some suggestions on how to use it:

-
    -
  • Use it as a supplement to the textbook. The workbook is designed to complement the textbook and provide additional practice and reinforcement of the topics and skills covered in each chapter. Learners can use the workbook after completing each chapter in the textbook or as a review before moving on to the next one.
  • -
  • Use it as a self-study tool. The workbook can also be used independently from the textbook as a way to practice and improve one's German at home or on the go. Learners can choose the chapters and exercises that interest them or suit their needs and work at their own pace.
  • -
  • Use it as a test preparation tool

    -

    cec2833e83
    -
    -
    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Systran 7 Premium Translator Crack [TOP].md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Systran 7 Premium Translator Crack [TOP].md deleted file mode 100644 index a7cbf9a305eb8dce624eb3049d18d1d710dedcf8..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Systran 7 Premium Translator Crack [TOP].md +++ /dev/null @@ -1,19 +0,0 @@ -
    -

    How to Get Systran 7 Premium Translator for Free

    -

    Systran 7 Premium Translator is a powerful and advanced professional translation software that can help you create and manage high quality multilingual documents[^2^]. It supports over 50 languages and can translate text, speech, images, web pages, and documents. However, it is also very expensive and requires a license key to activate.

    -

    If you want to get Systran 7 Premium Translator for free, you might be tempted to look for a cracked version online. A cracked version is a modified version of the software that bypasses the license verification and allows you to use it without paying. However, this is not a good idea for several reasons:

    -

    Systran 7 Premium Translator Crack


    Download Ziphttps://urlcod.com/2uIbtM



    -
      -
    • It is illegal and unethical. By downloading and using a cracked version of Systran 7 Premium Translator, you are violating the intellectual property rights of the software developer and breaking the law. You could face legal consequences or fines if you are caught.
    • -
    • It is risky and unsafe. A cracked version of Systran 7 Premium Translator might contain viruses, malware, spyware, or other harmful programs that could damage your computer or steal your personal information. You could also expose yourself to cyberattacks or identity theft if you use a cracked version online.
    • -
    • It is unreliable and poor quality. A cracked version of Systran 7 Premium Translator might not work properly or have errors, bugs, or glitches that could affect the accuracy and quality of your translations. You could also miss out on updates, features, support, and security patches that are available for the official version.
    • -
    -

    Therefore, we do not recommend downloading or using a cracked version of Systran 7 Premium Translator. Instead, we suggest you try one of these alternatives:

    -
      -
    • Use the free online version of Systran Translate[^3^]. This is a web-based service that allows you to translate text, speech, images, web pages, and documents in over 50 languages. It is fast, easy, and secure. However, it has some limitations such as word count, file size, and translation quality.
    • -
    • Use a free trial of Systran 7 Premium Translator[^2^]. This is a way to test the software for a limited time before buying it. You can download it from the official website and use it for 30 days with full functionality. However, you will need to provide your email address and agree to receive marketing communications from Systran.
    • -
    • Buy Systran 7 Premium Translator[^2^]. This is the best option if you want to enjoy all the benefits and features of the software without any risks or restrictions. You can buy it from the official website or from authorized resellers. The price varies depending on the language pair and the number of users.
    • -
    -

    We hope this article has helped you understand why you should avoid using a cracked version of Systran 7 Premium Translator and what are some better options for your translation needs.

    7196e7f11a
    -
    -
    \ No newline at end of file diff --git a/spaces/neuesql/sqlgptapp/app.py b/spaces/neuesql/sqlgptapp/app.py deleted file mode 100644 index 1a9573773f25e1fcfda89dccd630b4949f7ba6f5..0000000000000000000000000000000000000000 --- a/spaces/neuesql/sqlgptapp/app.py +++ /dev/null @@ -1,74 +0,0 @@ -from io import StringIO -import streamlit as st -import os -from loguru import logger -from client import OpenAIService, SQLService, FacebookLLAMAService, GoogleT5Service - -st.set_page_config( - page_title=' A SQL Generative Pre-trained Transformer', - layout='wide', - initial_sidebar_state='expanded' -) - -databases = ['Oracle', 'SQLServer', 'MySQL', 'DB2', 'PostgreSQL', 'Snowflake', 'Redshift'] - -models = ['openai-text-david-003', 'google-t5', 'facebook-llama'] -# ------------- - -st.sidebar.header('🎃 A SQL Transformer for Migration') - -model = st.sidebar.selectbox(label='Model', options=models, index=0) -openai_key = os.environ.get("OPEN-KEY", None) - -source_database = st.sidebar.selectbox( - label='📕 Source Database', - options=databases, - index=0 -) - -target_database = st.sidebar.selectbox( - label='📗Target Database', - options=databases, - index=4 -) - -input_text = st.sidebar.text_area( - label='📋 Insert SQL', - height=200, - placeholder='select id from customer where rownum <= 100' -) - -input_file = st.sidebar.file_uploader( - label=" 📄 Choose a SQL file", - accept_multiple_files=False) - - -def transform(): - client: SQLService = None - if model == "openai-text-david-003": - client = OpenAIService(openai_key) - elif model == "google-t5": - client = GoogleT5Service() - elif model == "facebook-llama": - client = FacebookLLAMAService() - logger.info(f"Using Model:{model}") - - code = input_text - source = source_database - target = target_database - if code: - solutions = client.translate(source, target, code) - st.code(solutions[0], language='sql') - else: - if input_file is not None: - # To convert to a string based IO: - stringio = StringIO(input_file.getvalue().decode("utf-8")) - # To read file as string: - sql = stringio.read() - solutions = client.translate(source, target, sql) - st.code(solutions[0], language='sql') - - -# --------------------------------------- - -transform() diff --git a/spaces/nickmuchi/license-plate-detection-with-YOLOS/README.md b/spaces/nickmuchi/license-plate-detection-with-YOLOS/README.md deleted file mode 100644 index e5dc52854a354109b9a8e9bc56241bd7779fd7bd..0000000000000000000000000000000000000000 --- a/spaces/nickmuchi/license-plate-detection-with-YOLOS/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: License Plate Detection with YOLOS -emoji: 🚗 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/nobrowning/M2M/languages.py b/spaces/nobrowning/M2M/languages.py deleted file mode 100644 index a8621657ff4a12ffa5932c85c47966d6aae21dfb..0000000000000000000000000000000000000000 --- a/spaces/nobrowning/M2M/languages.py +++ /dev/null @@ -1,47 +0,0 @@ -LANGUANGE_MAP = { - 0: 'Arabic', - 1: 'Basque', - 2: 'Breton', - 3: 'Catalan', - 4: 'Chinese', - 5: 'Chinese', - 6: 'Chinese', - 7: 'Chuvash', - 8: 'Czech', - 9: 'Dhivehi', - 10: 'Dutch', - 11: 'English', - 12: 'Esperanto', - 13: 'Estonian', - 14: 'French', - 15: 'Frisian', - 16: 'Georgian', - 17: 'German', - 18: 'Greek', - 19: 'Hakha_Chin', - 20: 'Indonesian', - 21: 'Interlingua', - 22: 'Italian', - 23: 'Japanese', - 24: 'Kabyle', - 25: 'Kinyarwanda', - 26: 'Kyrgyz', - 27: 'Latvian', - 28: 'Maltese', - 29: 'Mongolian', - 30: 'Persian', - 31: 'Polish', - 32: 'Portuguese', - 33: 'Romanian', - 34: 'Romansh_Sursilvan', - 35: 'Russian', - 36: 'Sakha', - 37: 'Slovenian', - 38: 'Spanish', - 39: 'Swedish', - 40: 'Tamil', - 41: 'Tatar', - 42: 'Turkish', - 43: 'Ukranian', - 44: 'Welsh' - } \ No newline at end of file diff --git a/spaces/nomic-ai/YeungNLP_firefly-train-1.1M/index.html b/spaces/nomic-ai/YeungNLP_firefly-train-1.1M/index.html deleted file mode 100644 index d1d397b271e4bd18dfaac63bfedab32493a5ef5b..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/YeungNLP_firefly-train-1.1M/index.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - YeungNLP/firefly-train-1.1M - - - - -
    - -
    - - - \ No newline at end of file diff --git a/spaces/ntt123/vietnam-male-voice-wavegru-tts/sparse_matmul/compute/thread_bounds.cc b/spaces/ntt123/vietnam-male-voice-wavegru-tts/sparse_matmul/compute/thread_bounds.cc deleted file mode 100644 index e37a395e7585740d4e71acbeeffc3c319081fed4..0000000000000000000000000000000000000000 --- a/spaces/ntt123/vietnam-male-voice-wavegru-tts/sparse_matmul/compute/thread_bounds.cc +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "sparse_matmul/compute/thread_bounds.h" - -#include - -#include "glog/logging.h" - -namespace csrblocksparse { - -void ThreadBounds::PrepareForThreads(int block_width, int block_height, - int num_threads, - int reduced_rows_per_cache_row, - int reduced_rows, const int* nnz_per_row) { - CHECK_GT(num_threads, 0); - block_width_ = block_width; - block_height_ = block_height; - ComputeThreadSplitPoints(num_threads, reduced_rows_per_cache_row, - reduced_rows, nnz_per_row); - weight_starts_.clear(); - rhs_indices_starts_.clear(); - bias_starts_.clear(); - weight_starts_.reserve(row_starts_.size()); - rhs_indices_starts_.reserve(row_starts_.size()); - bias_starts_.reserve(row_starts_.size()); - - // Compute the start indices of each of the types, given what we know about - // padding, and number of |nnz_per_row|. - int weight_index = 0; - int rhs_indices_index = 0; - int bias_index = 0; - int row = 0; - for (int start : row_starts_) { - while (row < start) { - weight_index += nnz_per_row[row] * block_width_ * block_height_; - rhs_indices_index += nnz_per_row[row]; - bias_index += block_height_; - ++row; - } - weight_starts_.push_back(weight_index); - rhs_indices_starts_.push_back(rhs_indices_index); - bias_starts_.push_back(bias_index); - } -} - -// Computes the block row (reduced) index of the start of each thread. -void ThreadBounds::ComputeThreadSplitPoints(int num_threads, - int reduced_rows_per_cache_row, - int reduced_rows, - const int* nnz_per_row) { - row_starts_.assign(/*n=*/1, /*val=*/0); - // Break the rule if the matrix is too small to allow one per thread, which - // occurs only during tests. - if (reduced_rows_per_cache_row * num_threads > reduced_rows) - reduced_rows_per_cache_row = std::max(reduced_rows / num_threads, 1); - int cache_rows = (reduced_rows + reduced_rows_per_cache_row - 1) / - reduced_rows_per_cache_row; - - // Compute exclusive prefix sum of the amount of work per row. - std::vector work_upto_row(cache_rows + 1, 0); - int extra_row_work = 2 * reduced_rows_per_cache_row; - for (int i = 0; i < cache_rows; ++i) { - int new_nnz = 0; - for (int j = 0; j < reduced_rows_per_cache_row; ++j) { - // if |reduced_rows_per_cache_row| isn't an exact multiple of the - // matrix size, then we need to be careful here. - int index = i * reduced_rows_per_cache_row + j; - if (index < reduced_rows) new_nnz += nnz_per_row[index]; - } - work_upto_row[i + 1] = new_nnz + extra_row_work + work_upto_row[i]; - } - int total_work = work_upto_row.back(); - // Find the split point point based on assigned approximately equal amount - // of work for each thread. - int prev_split = 0; - for (int i = 1; i <= num_threads; ++i) { - int split = std::distance( - work_upto_row.begin(), - std::lower_bound(work_upto_row.begin(), work_upto_row.end(), - i * total_work / num_threads)); - int split_row = split * reduced_rows_per_cache_row; - if (i == num_threads) { - split_row = reduced_rows; - } - - VLOG(2) << "tid=" << i - 1 << " num rows=" << split_row - row_starts_.back() - << " work=" << work_upto_row[split] - work_upto_row[prev_split]; - row_starts_.push_back(split_row); - prev_split = split; - } - VLOG(2) << "total rows=" << reduced_rows << " total work=" << total_work; -} - -} // namespace csrblocksparse diff --git a/spaces/omdena-lc/omdena-ng-lagos-chatbot-model/actions/actions.py b/spaces/omdena-lc/omdena-ng-lagos-chatbot-model/actions/actions.py deleted file mode 100644 index 8bf1f757f851343b4bb1c56e40bf7cf9bde717ae..0000000000000000000000000000000000000000 --- a/spaces/omdena-lc/omdena-ng-lagos-chatbot-model/actions/actions.py +++ /dev/null @@ -1,27 +0,0 @@ -# This files contains your custom actions which can be used to run -# custom Python code. -# -# See this guide on how to implement these action: -# https://rasa.com/docs/rasa/custom-actions - - -# This is a simple example for a custom action which utters "Hello World!" - -# from typing import Any, Text, Dict, List -# -# from rasa_sdk import Action, Tracker -# from rasa_sdk.executor import CollectingDispatcher -# -# -# class ActionHelloWorld(Action): -# -# def name(self) -> Text: -# return "action_hello_world" -# -# def run(self, dispatcher: CollectingDispatcher, -# tracker: Tracker, -# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]: -# -# dispatcher.utter_message(text="Hello World!") -# -# return [] diff --git a/spaces/openkg/llm_leaderboard/KBQA_eval.py b/spaces/openkg/llm_leaderboard/KBQA_eval.py deleted file mode 100644 index f509f4950c2195c4f8c9be29ce4f9c60d5ac2388..0000000000000000000000000000000000000000 --- a/spaces/openkg/llm_leaderboard/KBQA_eval.py +++ /dev/null @@ -1,163 +0,0 @@ -import json -import re -from tqdm import tqdm - -def remove_punctuation(text): - pattern = r'[^\w\s]' - cleaned_text = re.sub(pattern, '', text) - return cleaned_text - - -def get_result_sing(data_input, gold_json, dataset): - # with open(input_dir, "r", encoding="utf-8") as file: - # data_input = json.load(file) - data_test = [line for line in data_input if line["dataset"] == dataset] - with open(gold_json, "r", encoding="utf-8") as file: - data_gold = json.load(file) - - if dataset == "LC-quad2": - with open("./kbqa_files/be_removed_id.json", "r", encoding='utf-8') as file: - removed_list = json.load(file) - data_test = [data_line for data_line in data_test if int(data_line["id"]) not in [int(i) for i in removed_list]] - print(dataset, ": start") - - flag_list = [] - for line_test in tqdm(data_test): - flag = 0 - line_test_id, line_test_answer = line_test["id"], line_test["answer"] - if "ans" in data_gold[0].keys(): - for i in data_gold: - if int(i["ID"]) == int(line_test_id): - line_true_answer_list = i["ans"] - data_gold.remove(i) - break - # line_true_answer_list = [i for i in data_gold if int(i["ID"]) == int(line_test_id)][0]["ans"] - elif "answer" in data_gold[0].keys(): - for i in data_gold: - if int(i["ID"]) == int(line_test_id): - line_true_answer_list = i["answer"] - data_gold.remove(i) - break - # line_true_answer_list = [i for i in data_gold if int(i["ID"]) == int(line_test_id)][0]["answer"] - - # 针对LC数据集的优化 - if dataset == "LC-quad2": - if "True" in line_true_answer_list: - line_true_answer_list.append("yes") - if "False" in line_true_answer_list: - line_true_answer_list.append("no") - - if type(line_true_answer_list) == list: - for line_true_answer in line_true_answer_list: - if str(line_true_answer) in str(line_test_answer): - flag = 1 - break - else: - if str(line_true_answer_list) in str(line_test_answer): - flag = 1 - - flag_list.append(flag) - return (sum(flag_list)) / len(data_test) - - -def get_result_QALD(data_input, gold_json): - # with open(input_dir, encoding="utf-8") as file: - # data_input = json.load(file) - # data_MKQA = [line for line in data_input if line["dataset"] == "MKQA"] - with open(gold_json, "r", encoding="utf-8") as file: - data_gold = json.load(file) - flag_list = [] - - print("QALD : start") - for line_true in tqdm(data_gold): - flag = 0 - line_true_id, line_true_answer_list = line_true["ID"], line_true["ans"] - line_test_answer_list = [line_test["answer"] for line_test in data_input if - int(line_test["id"]) == int(line_true_id)] - for line_test_answer in line_test_answer_list: - for line_true_answer in line_true_answer_list: - if str(line_true_answer) in str(line_test_answer): - flag = 1 - break - if flag == 1: - break - flag_list.append(flag) - return (sum(flag_list) / len(data_gold)) - - -def get_result_MKQA(data_input, gold_json): - # with open(input_dir, "r", encoding="utf-8") as file: - # data_input = json.load(file) - data_MKQA = [line for line in data_input if line["dataset"] == "MKQA"] - with open(gold_json, "r", encoding="utf-8") as file: - data_gold = json.load(file) - flag_list = [] - print("MKQA : start") - for line_test in data_MKQA: - flag = 0 - line_test_language, line_test_id, line_test_answer = line_test["language"], line_test["id"], line_test["answer"] - line_true_answer_aliases = \ - [line_true for line_true in data_gold if line_true["id"] == line_test_id][0]["answers"][line_test_language] - # 经检验len(line_true_answer_aliases)==2的答案都有问题 - if len(line_true_answer_aliases) == 1: - line_true_answer = line_true_answer_aliases[0]["text"] - line_true_answer_list = [line_true_answer] - try: - line_true_aliases = line_true_answer_aliases[0]["aliases"] - line_true_answer_list.extend(line_true_aliases) - except: - pass - line_test_answer = remove_punctuation(line_test_answer.lower().replace('_', ' ')) - line_true_answer_list = [remove_punctuation(i.lower().replace('_', ' ')) for i in line_true_answer_list] - # 判定synonyms中是否有任意元素属于line_test - for i in line_true_answer_list: - if i in line_test_answer: - flag = 1 - break - - flag_list.append(flag) - - return sum(flag_list) / len(data_MKQA) - - - -def get_KBQA_eval_result(data_input): - dic_gold_json = { - "CWQ": "CWQ_all_question_with_label.json" - ,"GrailQA": "GarilQA_all_question_with_label.json" - ,"GraphQ":"GraphQuestions_all_question_with_label.json" - ,"KQApro":"KQAPro_all_question_with_label.json" - ,"LC-quad2":"LC_quad2_all_question_with_label_unanswerabel_be_removed_.json" - ,"MKQA":"mkqa_sample_label_final_processed.json" - ,"QALD-9":"QALD-9_all_question_with_ans_label.json" - ,"WQSP":"WQSP_all_question_with_label.json" - } - - result_dic = {} - for key in dic_gold_json.keys(): - gold_json = "./kbqa_files/"+dic_gold_json[key] - if key == "MKQA": - try: - result = get_result_MKQA(data_input=data_input, gold_json=gold_json) - except: - result = "-" - elif key == "QALD-9": - try: - result = get_result_QALD(data_input=data_input, gold_json=gold_json) - except: - result = "-" - else: - #result = get_result_sing(input_dir=input_dir, gold_json=gold_json, dataset=key) - try: - result = get_result_sing(data_input=data_input, gold_json=gold_json,dataset=key) - except: - result = "-" - result_dic[key] = result - return result_dic - -if __name__=='__main__': - input_dir = r"KBQA_eval_sample.json" - with open(input_dir, "r", encoding="utf-8") as file: - data_input = json.load(file) - print(get_KBQA_eval_result(data_input)) - diff --git a/spaces/owaiskha9654/Custom_Yolov7/utils/torch_utils.py b/spaces/owaiskha9654/Custom_Yolov7/utils/torch_utils.py deleted file mode 100644 index 1e631b555508457a4944c11a479176463719c0e8..0000000000000000000000000000000000000000 --- a/spaces/owaiskha9654/Custom_Yolov7/utils/torch_utils.py +++ /dev/null @@ -1,374 +0,0 @@ -# YOLOR PyTorch utils - -import datetime -import logging -import math -import os -import platform -import subprocess -import time -from contextlib import contextmanager -from copy import deepcopy -from pathlib import Path - -import torch -import torch.backends.cudnn as cudnn -import torch.nn as nn -import torch.nn.functional as F -import torchvision - -try: - import thop # for FLOPS computation -except ImportError: - thop = None -logger = logging.getLogger(__name__) - - -@contextmanager -def torch_distributed_zero_first(local_rank: int): - """ - Decorator to make all processes in distributed training wait for each local_master to do something. - """ - if local_rank not in [-1, 0]: - torch.distributed.barrier() - yield - if local_rank == 0: - torch.distributed.barrier() - - -def init_torch_seeds(seed=0): - # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html - torch.manual_seed(seed) - if seed == 0: # slower, more reproducible - cudnn.benchmark, cudnn.deterministic = False, True - else: # faster, less reproducible - cudnn.benchmark, cudnn.deterministic = True, False - - -def date_modified(path=__file__): - # return human-readable file modification date, i.e. '2021-3-26' - t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) - return f'{t.year}-{t.month}-{t.day}' - - -def git_describe(path=Path(__file__).parent): # path must be a directory - # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe - s = f'git -C {path} describe --tags --long --always' - try: - return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] - except subprocess.CalledProcessError as e: - return '' # not a git repository - - -def select_device(device='', batch_size=None): - # device = 'cpu' or '0' or '0,1,2,3' - s = f'YOLOR 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string - cpu = device.lower() == 'cpu' - if cpu: - os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False - elif device: # non-cpu device requested - os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability - - cuda = not cpu and torch.cuda.is_available() - if cuda: - n = torch.cuda.device_count() - if n > 1 and batch_size: # check that batch_size is compatible with device_count - assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' - space = ' ' * len(s) - for i, d in enumerate(device.split(',') if device else range(n)): - p = torch.cuda.get_device_properties(i) - s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB - else: - s += 'CPU\n' - - logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe - return torch.device('cuda:0' if cuda else 'cpu') - - -def time_synchronized(): - # pytorch-accurate time - if torch.cuda.is_available(): - torch.cuda.synchronize() - return time.time() - - -def profile(x, ops, n=100, device=None): - # profile a pytorch module or list of modules. Example usage: - # x = torch.randn(16, 3, 640, 640) # input - # m1 = lambda x: x * torch.sigmoid(x) - # m2 = nn.SiLU() - # profile(x, [m1, m2], n=100) # profile speed over 100 iterations - - device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') - x = x.to(device) - x.requires_grad = True - print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') - print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") - for m in ops if isinstance(ops, list) else [ops]: - m = m.to(device) if hasattr(m, 'to') else m # device - m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type - dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward - try: - flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS - except: - flops = 0 - - for _ in range(n): - t[0] = time_synchronized() - y = m(x) - t[1] = time_synchronized() - try: - _ = y.sum().backward() - t[2] = time_synchronized() - except: # no backward method - t[2] = float('nan') - dtf += (t[1] - t[0]) * 1000 / n # ms per op forward - dtb += (t[2] - t[1]) * 1000 / n # ms per op backward - - s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' - s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' - p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters - print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') - - -def is_parallel(model): - return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) - - -def intersect_dicts(da, db, exclude=()): - # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} - - -def initialize_weights(model): - for m in model.modules(): - t = type(m) - if t is nn.Conv2d: - pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif t is nn.BatchNorm2d: - m.eps = 1e-3 - m.momentum = 0.03 - elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: - m.inplace = True - - -def find_modules(model, mclass=nn.Conv2d): - # Finds layer indices matching module class 'mclass' - return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] - - -def sparsity(model): - # Return global model sparsity - a, b = 0., 0. - for p in model.parameters(): - a += p.numel() - b += (p == 0).sum() - return b / a - - -def prune(model, amount=0.3): - # Prune model to requested global sparsity - import torch.nn.utils.prune as prune - print('Pruning model... ', end='') - for name, m in model.named_modules(): - if isinstance(m, nn.Conv2d): - prune.l1_unstructured(m, name='weight', amount=amount) # prune - prune.remove(m, 'weight') # make permanent - print(' %.3g global sparsity' % sparsity(model)) - - -def fuse_conv_and_bn(conv, bn): - # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ - fusedconv = nn.Conv2d(conv.in_channels, - conv.out_channels, - kernel_size=conv.kernel_size, - stride=conv.stride, - padding=conv.padding, - groups=conv.groups, - bias=True).requires_grad_(False).to(conv.weight.device) - - # prepare filters - w_conv = conv.weight.clone().view(conv.out_channels, -1) - w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) - fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) - - # prepare spatial bias - b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias - b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) - fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) - - return fusedconv - - -def model_info(model, verbose=False, img_size=640): - # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] - n_p = sum(x.numel() for x in model.parameters()) # number parameters - n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients - if verbose: - print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) - for i, (name, p) in enumerate(model.named_parameters()): - name = name.replace('module_list.', '') - print('%5g %40s %9s %12g %20s %10.3g %10.3g' % - (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) - - try: # FLOPS - from thop import profile - stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 - img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input - flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS - img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float - fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS - except (ImportError, Exception): - fs = '' - - logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") - - -def load_classifier(name='resnet101', n=2): - # Loads a pretrained model reshaped to n-class output - model = torchvision.models.__dict__[name](pretrained=True) - - # ResNet model properties - # input_size = [3, 224, 224] - # input_space = 'RGB' - # input_range = [0, 1] - # mean = [0.485, 0.456, 0.406] - # std = [0.229, 0.224, 0.225] - - # Reshape output to n classes - filters = model.fc.weight.shape[1] - model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) - model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) - model.fc.out_features = n - return model - - -def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) - # scales img(bs,3,y,x) by ratio constrained to gs-multiple - if ratio == 1.0: - return img - else: - h, w = img.shape[2:] - s = (int(h * ratio), int(w * ratio)) # new size - img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize - if not same_shape: # pad/crop img - h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] - return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean - - -def copy_attr(a, b, include=(), exclude=()): - # Copy attributes from b to a, options to only include [...] and to exclude [...] - for k, v in b.__dict__.items(): - if (len(include) and k not in include) or k.startswith('_') or k in exclude: - continue - else: - setattr(a, k, v) - - -class ModelEMA: - """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models - Keep a moving average of everything in the model state_dict (parameters and buffers). - This is intended to allow functionality like - https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage - A smoothed version of the weights is necessary for some training schemes to perform well. - This class is sensitive where it is initialized in the sequence of model init, - GPU assignment and distributed training wrappers. - """ - - def __init__(self, model, decay=0.9999, updates=0): - # Create EMA - self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA - # if next(model.parameters()).device.type != 'cpu': - # self.ema.half() # FP16 EMA - self.updates = updates # number of EMA updates - self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) - for p in self.ema.parameters(): - p.requires_grad_(False) - - def update(self, model): - # Update EMA parameters - with torch.no_grad(): - self.updates += 1 - d = self.decay(self.updates) - - msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict - for k, v in self.ema.state_dict().items(): - if v.dtype.is_floating_point: - v *= d - v += (1. - d) * msd[k].detach() - - def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): - # Update EMA attributes - copy_attr(self.ema, model, include, exclude) - - -class BatchNormXd(torch.nn.modules.batchnorm._BatchNorm): - def _check_input_dim(self, input): - # The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc - # is this method that is overwritten by the sub-class - # This original goal of this method was for tensor sanity checks - # If you're ok bypassing those sanity checks (eg. if you trust your inference - # to provide the right dimensional inputs), then you can just use this method - # for easy conversion from SyncBatchNorm - # (unfortunately, SyncBatchNorm does not store the original class - if it did - # we could return the one that was originally created) - return - -def revert_sync_batchnorm(module): - # this is very similar to the function that it is trying to revert: - # https://github.com/pytorch/pytorch/blob/c8b3686a3e4ba63dc59e5dcfe5db3430df256833/torch/nn/modules/batchnorm.py#L679 - module_output = module - if isinstance(module, torch.nn.modules.batchnorm.SyncBatchNorm): - new_cls = BatchNormXd - module_output = BatchNormXd(module.num_features, - module.eps, module.momentum, - module.affine, - module.track_running_stats) - if module.affine: - with torch.no_grad(): - module_output.weight = module.weight - module_output.bias = module.bias - module_output.running_mean = module.running_mean - module_output.running_var = module.running_var - module_output.num_batches_tracked = module.num_batches_tracked - if hasattr(module, "qconfig"): - module_output.qconfig = module.qconfig - for name, child in module.named_children(): - module_output.add_module(name, revert_sync_batchnorm(child)) - del module - return module_output - - -class TracedModel(nn.Module): - - def __init__(self, model=None, device=None, img_size=(640,640)): - super(TracedModel, self).__init__() - - print(" Convert model to Traced-model... ") - self.stride = model.stride - self.names = model.names - self.model = model - - self.model = revert_sync_batchnorm(self.model) - self.model.to('cpu') - self.model.eval() - - self.detect_layer = self.model.model[-1] - self.model.traced = True - - rand_example = torch.rand(1, 3, img_size, img_size) - - traced_script_module = torch.jit.trace(self.model, rand_example, strict=False) - #traced_script_module = torch.jit.script(self.model) - traced_script_module.save("traced_model.pt") - print(" traced_script_module saved! ") - self.model = traced_script_module - self.model.to(device) - self.detect_layer.to(device) - print(" model is traced! \n") - - def forward(self, x, augment=False, profile=False): - out = self.model(x) - out = self.detect_layer(out) - return out \ No newline at end of file diff --git a/spaces/patilyash22/ChatBotWithOpenAILangChainAndPlayHT/app.py b/spaces/patilyash22/ChatBotWithOpenAILangChainAndPlayHT/app.py deleted file mode 100644 index 08e62697af0a3666bdd8bc96c216fbc4b0bef26c..0000000000000000000000000000000000000000 --- a/spaces/patilyash22/ChatBotWithOpenAILangChainAndPlayHT/app.py +++ /dev/null @@ -1,164 +0,0 @@ -import os -import re -import requests -import json -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') -PLAY_HT_API_KEY=os.getenv('PLAY_HT_API_KEY') -PLAY_HT_USER_ID=os.getenv('PLAY_HT_USER_ID') - -PLAY_HT_VOICE_ID=os.getenv('PLAY_HT_VOICE_ID') -play_ht_api_get_audio_url = "https://play.ht/api/v2/tts" - - -template = """Meet Yash, your youthful and witty personal assistant! At 19 years old, he's full of energy and always eager to help. Yash's goal is to assist you with any questions or problems you might have. His enthusiasm shines through in every response, making interactions with her enjoyable and engaging. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -headers = { - "accept": "text/event-stream", - "content-type": "application/json", - "AUTHORIZATION": "Bearer "+ PLAY_HT_API_KEY, - "X-USER-ID": PLAY_HT_USER_ID -} - - -def get_payload(text): - return { - "text": text, - "voice": PLAY_HT_VOICE_ID, - "quality": "medium", - "output_format": "mp3", - "speed": 1, - "sample_rate": 24000, - "seed": None, - "temperature": None - } - -def get_generated_audio(text): - payload = get_payload(text) - generated_response = {} - try: - response = requests.post(play_ht_api_get_audio_url, json=payload, headers=headers) - response.raise_for_status() - generated_response["type"]= 'SUCCESS' - generated_response["response"] = response.text - except requests.exceptions.RequestException as e: - generated_response["type"]= 'ERROR' - try: - response_text = json.loads(response.text) - if response_text['error_message']: - generated_response["response"] = response_text['error_message'] - else: - generated_response["response"] = response.text - except Exception as e: - generated_response["response"] = response.text - except Exception as e: - generated_response["type"]= 'ERROR' - generated_response["response"] = response.text - return generated_response - -def extract_urls(text): - # Define the regex pattern for URLs - url_pattern = r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+[/\w\.-]*' - - # Find all occurrences of URLs in the text - urls = re.findall(url_pattern, text) - - return urls - -def get_audio_reply_for_question(text): - generated_audio_event = get_generated_audio(text) - #From get_generated_audio, you will get events in a string format, from that we need to extract the url - final_response = { - "audio_url": '', - "message": '' - } - if generated_audio_event["type"] == 'SUCCESS': - audio_urls = extract_urls(generated_audio_event["response"]) - if len(audio_urls) == 0: - final_response['message'] = "No audio file link found in generated event" - else: - final_response['audio_url'] = audio_urls[-1] - else: - final_response['message'] = generated_audio_event['response'] - return final_response - -def download_url(url): - try: - # Send a GET request to the URL to fetch the content - final_response = { - 'content':'', - 'error':'' - } - response = requests.get(url) - # Check if the request was successful (status code 200) - if response.status_code == 200: - final_response['content'] = response.content - else: - final_response['error'] = f"Failed to download the URL. Status code: {response.status_code}" - except Exception as e: - final_response['error'] = f"Failed to download the URL. Error: {e}" - return final_response - -def get_filename_from_url(url): - # Use os.path.basename() to extract the file name from the URL - file_name = os.path.basename(url) - return file_name - -def get_text_response(user_message): - response = llm_chain.predict(user_message = user_message) - return response - -def get_text_response_and_audio_response(user_message): - response = get_text_response(user_message) # Getting the reply from Open AI - audio_reply_for_question_response = get_audio_reply_for_question(response) - final_response = { - 'output_file_path': '', - 'message':'' - } - audio_url = audio_reply_for_question_response['audio_url'] - if audio_url: - output_file_path=get_filename_from_url(audio_url) - download_url_response = download_url(audio_url) - audio_content = download_url_response['content'] - if audio_content: - with open(output_file_path, "wb") as audio_file: - audio_file.write(audio_content) - final_response['output_file_path'] = output_file_path - else: - final_response['message'] = download_url_response['error'] - else: - final_response['message'] = audio_reply_for_question_response['message'] - return final_response - -def chat_bot_response(message, history): - text_and_audio_response = get_text_response_and_audio_response(message) - output_file_path = text_and_audio_response['output_file_path'] - if output_file_path: - return (text_and_audio_response['output_file_path'],) - else: - return text_and_audio_response['message'] - -demo = gr.ChatInterface(chat_bot_response,examples=["How are you doing?","What are your interests?","Which places do you like to visit?"]) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/phyloforfun/VoucherVision/vouchervision/embed_occ.py b/spaces/phyloforfun/VoucherVision/vouchervision/embed_occ.py deleted file mode 100644 index 4eb07156727482c34659a41a928f78e30cb2ca23..0000000000000000000000000000000000000000 --- a/spaces/phyloforfun/VoucherVision/vouchervision/embed_occ.py +++ /dev/null @@ -1,178 +0,0 @@ -import openai -import os -import sys -import inspect -from tqdm import tqdm -import pandas as pd -import numpy as np -from sklearn.metrics.pairwise import cosine_similarity -import json -import gradio as gr - -currentdir = os.path.dirname(os.path.abspath( - inspect.getfile(inspect.currentframe()))) -parentdir = os.path.dirname(currentdir) -sys.path.append(parentdir) -from vouchervision.general_utils import get_cfg_from_full_path -from prompts import PROMPT_UMICH_skeleton_all_asia -from LLM_chatGPT_3_5 import num_tokens_from_string, OCR_to_dict - -''' -This generates OpenAI embedding. These are no longer used by VoucherVision. -We have transitioned to "hkunlp/instructor-xl" - -Please see: https://huggingface.co/hkunlp/instructor-xl - -This file has some experimentation code that can be helpful to reference, -but is no relevant to VoucherVision. -''' - -class GenerateEmbeddings: - def __init__(self, file_occ, file_name, dir_out="D:/D_Desktop/embedding"): - self.file_occ = file_occ - self.file_name = file_name - self.dir_out = dir_out - - self.SEP = '!!' - - # Set API key - dir_home = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) - path_cfg_private = os.path.join(dir_home, 'PRIVATE_DATA.yaml') - cfg_private = get_cfg_from_full_path(path_cfg_private) - openai.api_key = cfg_private['openai']['openai_api_key'] - - def generate(self): - # Read CSV file - df = pd.read_csv(self.file_occ, sep='\t', - on_bad_lines='skip', dtype=str, low_memory=False) - - # Extract headers separately - dwc_headers = df.columns.tolist() - - # Combine columns into a single string separated by commas - df['combined'] = df.apply( - lambda row: self.SEP.join(row.values.astype(str)), axis=1) - - # Wrap the get_embedding function call with tqdm progress bar - tqdm.pandas(desc="Generating embeddings") - df['ada_embedding'] = df.combined.progress_apply( - lambda x: self.get_embedding(x, model='text-embedding-ada-002')) - - # Save to output CSV - output_file = os.path.join( - self.dir_out, f'embedded_dwc__{self.file_name}.csv') - df[['combined', 'ada_embedding']].to_csv(output_file, index=False) - - # Save headers to a separate CSV file - headers_file = os.path.join( - self.dir_out, f'dwc_headers__{self.file_name}.csv') - with open(headers_file, 'w') as f: - f.write('\n'.join(dwc_headers)) - - return output_file - - def get_embedding(self, text, model="text-embedding-ada-002"): - text = text.replace("\n", " ") - return openai.Embedding.create(input=[text], model=model)['data'][0]['embedding'] - - def load_embedded_csv(self, csv_path): - df = pd.read_csv(csv_path) - df['ada_embedding'] = df.ada_embedding.apply(eval).apply(np.array) - - headers_file = os.path.join( - self.dir_out, f'dwc_headers__{self.file_name}.csv') - with open(headers_file, 'r') as f: - dwc_headers = f.read().splitlines() - - return df, dwc_headers - - def search_rows(self, dwc_headers, df, query, n=3, pprint=True): - query_embedding = self.get_embedding( - query, model="text-embedding-ada-002") - df["similarity"] = df.ada_embedding.apply( - lambda x: cosine_similarity([x], [query_embedding])[0][0]) - - results = df.sort_values("similarity", ascending=False).head(n) - - if pprint: - for i in range(n): - row = results.iloc[i] - df_split = pd.DataFrame( - [row.combined.split(self.SEP)], columns=dwc_headers) - df_clean = df_split.replace( - 'nan', np.nan).dropna(axis=1, how='any') - # Convert df_clean to a dictionary - row_dict = df_clean.to_dict(orient='records')[0] - - # Convert dictionary to a long literal string - row_string = json.dumps(row_dict) - - print(row_string) - # print(df_clean) - # print(df_clean.to_string(index=False)) - - nt = num_tokens_from_string(row_string, "cl100k_base") - print(nt) - - return results - - -def create_embeddings(file_occ, file_name, dir_out): - # Instantiate and generate embeddings - embedder = GenerateEmbeddings(file_occ, file_name, dir_out) - output_file = embedder.generate() - - -def old_method(img_path): - set_rules = """1. Your job is to return a new dict based on the structure of the reference dict ref_dict and these are your rules. - 2. You must look at ref_dict and refactor the new text called OCR to match the same formatting. - 3. OCR contains unstructured text, use your knowledge to put the OCR text into the correct ref_dict column. - 4. If there is a field that does not have a direct proxy in the OCR text, you can fill it in based on your knowledge, but you cannot generate new information. - 5. The dict key is the column header, the value is the new text. The separator in the new text is '!!', which indicates a new element but not strictly a new column. Remove the '!!' separator before adding text to the new dict - 6. Never put text from the ref_dict values into the new dict, but you must use the headers from ref_dict. - 7. There cannot be duplicate dictionary fields. - 8. Only return the new dict, do not explain your answer.""" - - # 4. If there is a simple typo you should correct the spelling, but do not rephrase or rework the ORC text. - sample_text = """['gbifID', 'abstract', 'accessRights', 'accrualMethod', 'accrualPeriodicity', 'accrualPolicy', 'alternative', 'audience', 'available', 'bibliographicCitation', 'conformsTo', 'contributor', 'coverage', 'created', 'creator', 'date', 'dateAccepted', 'dateCopyrighted', 'dateSubmitted', 'description', 'educationLevel', 'extent', 'format', 'hasFormat', 'hasPart', 'hasVersion', 'identifier', 'instructionalMethod', 'isFormatOf', 'isPartOf', 'isReferencedBy', 'isReplacedBy', 'isRequiredBy', 'isVersionOf', 'issued', 'language', 'license', 'mediator', 'medium', 'modified', 'provenance', 'publisher', 'references', 'relation', 'replaces', 'requires', 'rights', 'rightsHolder', 'source', 'spatial', 'subject', 'tableOfContents', 'temporal', 'title', 'type', 'valid', 'institutionID', 'collectionID', 'datasetID', 'institutionCode', 'collectionCode', 'datasetName', 'ownerInstitutionCode', 'basisOfRecord', 'informationWithheld', 'dataGeneralizations', 'dynamicProperties', 'occurrenceID', 'catalogNumber', 'recordNumber', 'recordedBy', 'recordedByID', 'individualCount', 'organismQuantity', 'organismQuantityType', 'sex', 'lifeStage', 'reproductiveCondition', 'behavior', 'establishmentMeans', 'degreeOfEstablishment', 'pathway', 'georeferenceVerificationStatus', 'occurrenceStatus', 'preparations', 'disposition', 'associatedOccurrences', 'associatedReferences', 'associatedSequences', 'associatedTaxa', 'otherCatalogNumbers', 'occurrenceRemarks', 'organismID', 'organismName', 'organismScope', 'associatedOrganisms', 'previousIdentifications', 'organismRemarks', 'materialSampleID', 'eventID', 'parentEventID', 'fieldNumber', 'eventDate', 'eventTime', 'startDayOfYear', 'endDayOfYear', 'year', 'month', 'day', 'verbatimEventDate', 'habitat', 'samplingProtocol', 'sampleSizeValue', 'sampleSizeUnit', 'samplingEffort', 'fieldNotes', 'eventRemarks', 'locationID', 'higherGeographyID', 'higherGeography', 'continent', 'waterBody', 'islandGroup', 'island', 'countryCode', 'stateProvince', 'county', 'municipality', 'locality', 'verbatimLocality', 'verbatimElevation', 'verticalDatum', 'verbatimDepth', 'minimumDistanceAboveSurfaceInMeters', 'maximumDistanceAboveSurfaceInMeters', 'locationAccordingTo', 'locationRemarks', 'decimalLatitude', 'decimalLongitude', 'coordinateUncertaintyInMeters', 'coordinatePrecision', 'pointRadiusSpatialFit', 'verbatimCoordinateSystem', 'verbatimSRS', 'footprintWKT', 'footprintSRS', 'footprintSpatialFit', 'georeferencedBy', 'georeferencedDate', 'georeferenceProtocol', 'georeferenceSources', 'georeferenceRemarks', 'geologicalContextID', 'earliestEonOrLowestEonothem', 'latestEonOrHighestEonothem', 'earliestEraOrLowestErathem', 'latestEraOrHighestErathem', 'earliestPeriodOrLowestSystem', 'latestPeriodOrHighestSystem', 'earliestEpochOrLowestSeries', 'latestEpochOrHighestSeries', 'earliestAgeOrLowestStage', 'latestAgeOrHighestStage', 'lowestBiostratigraphicZone', 'highestBiostratigraphicZone', 'lithostratigraphicTerms', 'group', 'formation', 'member', 'bed', 'identificationID', 'verbatimIdentification', 'identificationQualifier', 'typeStatus', 'identifiedBy', 'identifiedByID', 'dateIdentified', 'identificationReferences', 'identificationVerificationStatus', 'identificationRemarks', 'taxonID', 'scientificNameID', 'acceptedNameUsageID', 'parentNameUsageID', 'originalNameUsageID', 'nameAccordingToID', 'namePublishedInID', 'taxonConceptID', 'scientificName', 'acceptedNameUsage', 'parentNameUsage', 'originalNameUsage', 'nameAccordingTo', 'namePublishedIn', 'namePublishedInYear', 'higherClassification', 'kingdom', 'phylum', 'class', 'order', 'family', 'subfamily', 'genus', 'genericName', 'subgenus', 'infragenericEpithet', 'specificEpithet', 'infraspecificEpithet', 'cultivarEpithet', 'taxonRank', 'verbatimTaxonRank', 'vernacularName', 'nomenclaturalCode', 'taxonomicStatus', 'nomenclaturalStatus', 'taxonRemarks', 'datasetKey', 'publishingCountry', 'lastInterpreted', 'elevation', 'elevationAccuracy', 'depth', 'depthAccuracy', 'distanceAboveSurface', 'distanceAboveSurfaceAccuracy', 'issue', 'mediaType', 'hasCoordinate', 'hasGeospatialIssues', 'taxonKey', 'acceptedTaxonKey', 'kingdomKey', 'phylumKey', 'classKey', 'orderKey', 'familyKey', 'genusKey', 'subgenusKey', 'speciesKey', 'species', 'acceptedScientificName', 'verbatimScientificName', 'typifiedName', 'protocol', 'lastParsed', 'lastCrawled', 'repatriated', 'relativeOrganismQuantity', 'level0Gid', 'level0Name', 'level1Gid', 'level1Name', 'level2Gid', 'level2Name', 'level3Gid', 'level3Name', 'iucnRedListCategory']\n3898509458,nan,http://rightsstatements.org/vocab/CNE/1.0/,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,2605588,nan,nan,nan,nan,nan,nan,nan,nan,nan,CC0_1_0,nan,nan,2022-08-15T08:30:45Z,nan,nan,https://portal.neherbaria.org/portal/collections/individual/index.php?occid=2605588,nan,nan,nan,nan,Mohonk Preserve,nan,nan,nan,nan,nan,nan,nan,nan,nan,745e5369-ba4e-4b80-b4b7-d64ab309e7b7,nan,Mohonk Preserve,DSRC,nan,nan,PRESERVED_SPECIMEN,nan,nan,nan,f2d1ba77-1c4d-41f6-8569-50becee5e9c3,MOH002237,nan,Dan Smiley,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,PRESENT,nan,nan,nan,nan,nan,nan,nan,The Buff,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,1971-10-21T00:00:00,nan,294,nan,1971,10,21,10/21/71,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,US,New York,nan,nan,Mohonk Lake,nan,nan,nan,nan,nan,nan,nan,nan,41.772115,-74.153723,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,830036,nan,nan,nan,nan,nan,nan,nan,Populus tremuloides Michx.,nan,nan,nan,nan,nan,nan,Plantae|Charophyta|Streptophytina|Equisetopsida|Magnoliidae|Malpighiales|Salicaceae|Populus,Plantae,Tracheophyta,Magnoliopsida,Malpighiales,Salicaceae,nan,Populus,Populus,nan,nan,tremuloides,nan,nan,SPECIES,nan,nan,nan,ACCEPTED,nan,nan,ffe1030d-42d1-4bb5-8400-1123cc859a5a,US,2022-11-29T23:03:56.952Z,nan,nan,nan,nan,nan,nan,GEODETIC_DATUM_ASSUMED_WGS84;AMBIGUOUS_COLLECTION;INSTITUTION_MATCH_FUZZY,StillImage,true,false,3040215,3040215,6,7707728,220,1414,6664,3040183,nan,3040215,Populus tremuloides,Populus tremuloides Michx.,Populus tremuloides,nan,DWC_ARCHIVE,2022-11-29T23:03:56.952Z,2022-11-29T23:02:54.980Z,false,nan,USA,United States,USA.33_1,New York,USA.33.57_1,Ulster,nan,nan,LC""" - sample_text_headers = """['gbifID', 'abstract', 'accessRights', 'accrualMethod', 'accrualPeriodicity', 'accrualPolicy', 'alternative', 'audience', 'available', 'bibliographicCitation', 'conformsTo', 'contributor', 'coverage', 'created', 'creator', 'date', 'dateAccepted', 'dateCopyrighted', 'dateSubmitted', 'description', 'educationLevel', 'extent', 'format', 'hasFormat', 'hasPart', 'hasVersion', 'identifier', 'instructionalMethod', 'isFormatOf', 'isPartOf', 'isReferencedBy', 'isReplacedBy', 'isRequiredBy', 'isVersionOf', 'issued', 'language', 'license', 'mediator', 'medium', 'modified', 'provenance', 'publisher', 'references', 'relation', 'replaces', 'requires', 'rights', 'rightsHolder', 'source', 'spatial', 'subject', 'tableOfContents', 'temporal', 'title', 'type', 'valid', 'institutionID', 'collectionID', 'datasetID', 'institutionCode', 'collectionCode', 'datasetName', 'ownerInstitutionCode', 'basisOfRecord', 'informationWithheld', 'dataGeneralizations', 'dynamicProperties', 'occurrenceID', 'catalogNumber', 'recordNumber', 'recordedBy', 'recordedByID', 'individualCount', 'organismQuantity', 'organismQuantityType', 'sex', 'lifeStage', 'reproductiveCondition', 'behavior', 'establishmentMeans', 'degreeOfEstablishment', 'pathway', 'georeferenceVerificationStatus', 'occurrenceStatus', 'preparations', 'disposition', 'associatedOccurrences', 'associatedReferences', 'associatedSequences', 'associatedTaxa', 'otherCatalogNumbers', 'occurrenceRemarks', 'organismID', 'organismName', 'organismScope', 'associatedOrganisms', 'previousIdentifications', 'organismRemarks', 'materialSampleID', 'eventID', 'parentEventID', 'fieldNumber', 'eventDate', 'eventTime', 'startDayOfYear', 'endDayOfYear', 'year', 'month', 'day', 'verbatimEventDate', 'habitat', 'samplingProtocol', 'sampleSizeValue', 'sampleSizeUnit', 'samplingEffort', 'fieldNotes', 'eventRemarks', 'locationID', 'higherGeographyID', 'higherGeography', 'continent', 'waterBody', 'islandGroup', 'island', 'countryCode', 'stateProvince', 'county', 'municipality', 'locality', 'verbatimLocality', 'verbatimElevation', 'verticalDatum', 'verbatimDepth', 'minimumDistanceAboveSurfaceInMeters', 'maximumDistanceAboveSurfaceInMeters', 'locationAccordingTo', 'locationRemarks', 'decimalLatitude', 'decimalLongitude', 'coordinateUncertaintyInMeters', 'coordinatePrecision', 'pointRadiusSpatialFit', 'verbatimCoordinateSystem', 'verbatimSRS', 'footprintWKT', 'footprintSRS', 'footprintSpatialFit', 'georeferencedBy', 'georeferencedDate', 'georeferenceProtocol', 'georeferenceSources', 'georeferenceRemarks', 'geologicalContextID', 'earliestEonOrLowestEonothem', 'latestEonOrHighestEonothem', 'earliestEraOrLowestErathem', 'latestEraOrHighestErathem', 'earliestPeriodOrLowestSystem', 'latestPeriodOrHighestSystem', 'earliestEpochOrLowestSeries', 'latestEpochOrHighestSeries', 'earliestAgeOrLowestStage', 'latestAgeOrHighestStage', 'lowestBiostratigraphicZone', 'highestBiostratigraphicZone', 'lithostratigraphicTerms', 'group', 'formation', 'member', 'bed', 'identificationID', 'verbatimIdentification', 'identificationQualifier', 'typeStatus', 'identifiedBy', 'identifiedByID', 'dateIdentified', 'identificationReferences', 'identificationVerificationStatus', 'identificationRemarks', 'taxonID', 'scientificNameID', 'acceptedNameUsageID', 'parentNameUsageID', 'originalNameUsageID', 'nameAccordingToID', 'namePublishedInID', 'taxonConceptID', 'scientificName', 'acceptedNameUsage', 'parentNameUsage', 'originalNameUsage', 'nameAccordingTo', 'namePublishedIn', 'namePublishedInYear', 'higherClassification', 'kingdom', 'phylum', 'class', 'order', 'family', 'subfamily', 'genus', 'genericName', 'subgenus', 'infragenericEpithet', 'specificEpithet', 'infraspecificEpithet', 'cultivarEpithet', 'taxonRank', 'verbatimTaxonRank', 'vernacularName', 'nomenclaturalCode', 'taxonomicStatus', 'nomenclaturalStatus', 'taxonRemarks', 'datasetKey', 'publishingCountry', 'lastInterpreted', 'elevation', 'elevationAccuracy', 'depth', 'depthAccuracy', 'distanceAboveSurface', 'distanceAboveSurfaceAccuracy', 'issue', 'mediaType', 'hasCoordinate', 'hasGeospatialIssues', 'taxonKey', 'acceptedTaxonKey', 'kingdomKey', 'phylumKey', 'classKey', 'orderKey', 'familyKey', 'genusKey', 'subgenusKey', 'speciesKey', 'species', 'acceptedScientificName', 'verbatimScientificName', 'typifiedName', 'protocol', 'lastParsed', 'lastCrawled', 'repatriated', 'relativeOrganismQuantity', 'level0Gid', 'level0Name', 'level1Gid', 'level1Name', 'level2Gid', 'level2Name', 'level3Gid', 'level3Name', 'iucnRedListCategory']""" - - sample_OCR_response = """PLANTS OF BORNEC!! Euphorbiaceae!! Chaetocarpus castanocarpus Thwaites!! Det. JH Beaman, 15 May 2010 !!Sabah: Kota Kinabalu District: Bukit Padang, by UKMS!!temporary campus. Elev. 30 m. Eroded hills and gullies.!!scattered scrubby vegetation; Crocker Formation. Shrub.!!Lat. 5°58 N. Long. 116°06 E!!John H. Beaman 83041!!August 1983!!with Willem Meijer!!HERBARIA OF UNIVERSITI KEBANGSAAN MALAYSIA (UKMS) and!!MICHIGAN STATE UNIVERSITY (MSC)!!""" - sample_dict = """{"gbifID": "3898509458", "accessRights": "http://rightsstatements.org/vocab/CNE/1.0/", "identifier": "2605588", "license": "CC0_1_0", "modified": "2022-08-15T08:30:45Z", "references": "https://portal.neherbaria.org/portal/collections/individual/index.php?occid=2605588", "rightsHolder": "Mohonk Preserve", "collectionID": "745e5369-ba4e-4b80-b4b7-d64ab309e7b7", "institutionCode": "Mohonk Preserve", "collectionCode": "DSRC", "basisOfRecord": "PRESERVED_SPECIMEN", "occurrenceID": "f2d1ba77-1c4d-41f6-8569-50becee5e9c3", "catalogNumber": "MOH002237", "recordedBy": "Dan Smiley", "occurrenceStatus": "PRESENT", "occurrenceRemarks": "The Buff", "eventDate": "1971-10-21T00:00:00", "startDayOfYear": "294", "year": "1971", "month": "10", "day": "21", "verbatimEventDate": "10/21/71", "countryCode": "US", "stateProvince": "New York", "locality": "Mohonk Lake", "decimalLatitude": "41.772115", "decimalLongitude": "-74.153723", "taxonID": "830036", "scientificName": "Populus tremuloides Michx.", "higherClassification": "Plantae|Charophyta|Streptophytina|Equisetopsida|Magnoliidae|Malpighiales|Salicaceae|Populus", "kingdom": "Plantae", "phylum": "Tracheophyta", "class": "Magnoliopsida", "order": "Malpighiales", "family": "Salicaceae", "genus": "Populus", "genericName": "Populus", "specificEpithet": "tremuloides", "taxonRank": "SPECIES", "taxonomicStatus": "ACCEPTED", "datasetKey": "ffe1030d-42d1-4bb5-8400-1123cc859a5a", "publishingCountry": "US", "lastInterpreted": "2022-11-29T23:03:56.952Z", "issue": "GEODETIC_DATUM_ASSUMED_WGS84;AMBIGUOUS_COLLECTION;INSTITUTION_MATCH_FUZZY", "mediaType": "StillImage", "hasCoordinate": "true", "hasGeospatialIssues": "false", "taxonKey": "3040215", "acceptedTaxonKey": "3040215", "kingdomKey": "6", "phylumKey": "7707728", "classKey": "220", "orderKey": "1414", "familyKey": "6664", "genusKey": "3040183", "speciesKey": "3040215", "species": "Populus tremuloides", "acceptedScientificName": "Populus tremuloides Michx.", "verbatimScientificName": "Populus tremuloides", "protocol": "DWC_ARCHIVE", "lastParsed": "2022-11-29T23:03:56.952Z", "lastCrawled": "2022-11-29T23:02:54.980Z", "repatriated": "false", "level0Gid": "USA", "level0Name": "United States", "level1Gid": "USA.33_1", "level1Name": "New York", "level2Gid": "USA.33.57_1", "level2Name": "Ulster", "iucnRedListCategory": "LC"}""" - - nt_rules = num_tokens_from_string(set_rules, "cl100k_base") - nt_dict = num_tokens_from_string(sample_dict, "cl100k_base") - nt_ocr = num_tokens_from_string(sample_OCR_response, "cl100k_base") - - print(f"nt - nt_rules {nt_rules}") - print(f"nt - nt_dict {nt_dict}") - print(f"nt - nt_new {nt_ocr}") - - do_create = False - - file_occ = 'D:/Dropbox/LeafMachine2/leafmachine2/transcription/test_occ/occurrence_short.txt' - file_name = 'test_occ' - dir_out = "D:/D_Desktop/embedding" - - ''' - if do_create: - create_embeddings(file_occ, file_name, dir_out) - - - # Load the generated embeddings - output_file = os.path.join(dir_out, f'embedded_dwc__{file_name}.csv') - embedder = GenerateEmbeddings(file_occ, file_name, dir_out) - embedded_df, dwc_headers = embedder.load_embedded_csv(output_file) - - # Search for reviews - search_query = "1971 The Buff" - results = embedder.search_rows(dwc_headers, embedded_df, search_query, n=1) - print(results) - ''' - GPT_response = OCR_to_dict(img_path) - print(GPT_response) - - -if __name__ == '__main__': - print() - diff --git a/spaces/pikto/ELITE-ChatGPT-Streamlit-2/backupapp.py b/spaces/pikto/ELITE-ChatGPT-Streamlit-2/backupapp.py deleted file mode 100644 index 20a36a522a47cdfcf39d7aeb06f9a9ee1ab1efbb..0000000000000000000000000000000000000000 --- a/spaces/pikto/ELITE-ChatGPT-Streamlit-2/backupapp.py +++ /dev/null @@ -1,152 +0,0 @@ -import streamlit as st -import openai -import os -import base64 -import glob -import json -import mistune -import pytz -import textwrap - -from datetime import datetime -from openai import ChatCompletion -from xml.etree import ElementTree as ET -from bs4 import BeautifulSoup - -openai.api_key = os.getenv('OPENAI_KEY') -st.set_page_config( - page_title="GPT Streamlit Document Reasoner", - layout="wide") - -menu = ["txt", "htm", "md", "py"] -choice = st.sidebar.selectbox("Choose output file type to save results", menu) -choicePrefix = "Output and download file set to " -if choice == "txt": - st.sidebar.write(choicePrefix + "Text file.") -elif choice == "htm": - st.sidebar.write(choicePrefix + "HTML5.") -elif choice == "md": - st.sidebar.write(choicePrefix + "Markdown.") -elif choice == "py": - st.sidebar.write(choicePrefix + "Python Code.") - -max_length = st.sidebar.slider("Max document length", min_value=1000, max_value=32000, value=2000, step=1000) - -def truncate_document(document, length): - return document[:length] - -def chat_with_model(prompts): - model = "gpt-3.5-turbo" - conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}] - conversation.extend([{'role': 'user', 'content': prompt} for prompt in prompts]) - response = openai.ChatCompletion.create(model=model, messages=conversation) - return response['choices'][0]['message']['content'] - -def generate_filename(prompt, file_type): - central = pytz.timezone('US/Central') - safe_date_time = datetime.now(central).strftime("%m%d_%I%M") - safe_prompt = "".join(x for x in prompt if x.isalnum())[:28] - return f"{safe_date_time}_{safe_prompt}.{file_type}" - -def create_file(filename, prompt, response): - if filename.endswith(".txt"): - with open(filename, 'w') as file: - file.write(f"Prompt:\n{prompt}\nResponse:\n{response}") - elif filename.endswith(".htm"): - with open(filename, 'w') as file: - file.write(f"

    Prompt:

    {prompt}

    Response:

    {response}

    ") - elif filename.endswith(".md"): - with open(filename, 'w') as file: - file.write(f"# Prompt:\n{prompt}\n# Response:\n{response}") - -def get_table_download_link(file_path): - with open(file_path, 'r') as file: - data = file.read() - b64 = base64.b64encode(data.encode()).decode() - file_name = os.path.basename(file_path) - ext = os.path.splitext(file_name)[1] - - if ext == '.txt': - mime_type = 'text/plain' - elif ext == '.htm': - mime_type = 'text/html' - elif ext == '.md': - mime_type = 'text/markdown' - else: - mime_type = 'application/octet-stream' - - href = f'{file_name}' - return href - -def CompressXML(xml_text): - root = ET.fromstring(xml_text) - for elem in list(root.iter()): - if isinstance(elem.tag, str) and 'Comment' in elem.tag: - elem.parent.remove(elem) - return ET.tostring(root, encoding='unicode', method="xml") - -def read_file_content(file,max_length): - if file.type == "application/json": - content = json.load(file) - return str(content) - elif file.type == "text/html" or file.type == "text/htm": - content = BeautifulSoup(file, "html.parser") - return content.text - elif file.type == "application/xml" or file.type == "text/xml": - tree = ET.parse(file) - root = tree.getroot() - xml = CompressXML(ET.tostring(root, encoding='unicode')) - return xml - elif file.type == "text/markdown" or file.type == "text/md": - md = mistune.create_markdown() - content = md(file.read().decode()) - return content - elif file.type == "text/plain": - return file.getvalue().decode() - else: - return "" - -def main(): - user_prompt = st.text_area("Your question:", '', height=120) - uploaded_file = st.file_uploader("Choose a file", type=["xml", "json", "html", "htm", "md", "txt"]) - - if st.button('💬 Chat'): - st.write('Thinking and Reasoning with your inputs...') - file_content = "" - - if user_prompt: - prompts = textwrap.wrap(user_prompt, max_length) - for prompt in prompts: - response = chat_with_model([prompt]) - st.write('Response:') - st.write(response) - filename = generate_filename(prompt, choice) - create_file(filename, prompt, response) - st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True) - - if uploaded_file is not None: - file_content = read_file_content(uploaded_file, max_length) - document_parts = textwrap.wrap(file_content, max_length) - for part in document_parts: - response = chat_with_model([part]) - st.write('Response:') - st.write(response) - filename = generate_filename(part, choice) - create_file(filename, part, response) - st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True) - - if len(file_content) > 0: - st.text_area("File Content:", file_content, height=400) # Display file content in a scrollable text box - - all_files = glob.glob("*.txt") + glob.glob("*.htm") + glob.glob("*.md") - for file in all_files: - col1, col2 = st.sidebar.columns([4,1]) - with col1: - st.markdown(get_table_download_link(file), unsafe_allow_html=True) - with col2: - if st.button("🗑", key=file): - os.remove(file) - st.experimental_rerun() - -if __name__ == "__main__": - main() diff --git a/spaces/pikto/Elite-freegpt-webui/client/js/highlightjs-copy.min.js b/spaces/pikto/Elite-freegpt-webui/client/js/highlightjs-copy.min.js deleted file mode 100644 index ac11d33ec06e396c96b887494d9164a9b3996bef..0000000000000000000000000000000000000000 --- a/spaces/pikto/Elite-freegpt-webui/client/js/highlightjs-copy.min.js +++ /dev/null @@ -1 +0,0 @@ -class CopyButtonPlugin{constructor(options={}){self.hook=options.hook;self.callback=options.callback}"after:highlightElement"({el,text}){let button=Object.assign(document.createElement("button"),{innerHTML:"Copy",className:"hljs-copy-button"});button.dataset.copied=false;el.parentElement.classList.add("hljs-copy-wrapper");el.parentElement.appendChild(button);el.parentElement.style.setProperty("--hljs-theme-background",window.getComputedStyle(el).backgroundColor);button.onclick=function(){if(!navigator.clipboard)return;let newText=text;if(hook&&typeof hook==="function"){newText=hook(text,el)||text}navigator.clipboard.writeText(newText).then(function(){button.innerHTML="Copied!";button.dataset.copied=true;let alert=Object.assign(document.createElement("div"),{role:"status",className:"hljs-copy-alert",innerHTML:"Copied to clipboard"});el.parentElement.appendChild(alert);setTimeout(()=>{button.innerHTML="Copy";button.dataset.copied=false;el.parentElement.removeChild(alert);alert=null},2e3)}).then(function(){if(typeof callback==="function")return callback(newText,el)})}}} \ No newline at end of file diff --git a/spaces/pixiou/bingo/src/components/ui/sheet.tsx b/spaces/pixiou/bingo/src/components/ui/sheet.tsx deleted file mode 100644 index c9f5ce0f81a91067bb013e988a07eb1e6bf6953b..0000000000000000000000000000000000000000 --- a/spaces/pixiou/bingo/src/components/ui/sheet.tsx +++ /dev/null @@ -1,122 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SheetPrimitive from '@radix-ui/react-dialog' - -import { cn } from '@/lib/utils' -import { IconClose } from '@/components/ui/icons' - -const Sheet = SheetPrimitive.Root - -const SheetTrigger = SheetPrimitive.Trigger - -const SheetClose = SheetPrimitive.Close - -const SheetPortal = ({ - className, - children, - ...props -}: SheetPrimitive.DialogPortalProps) => ( - - {children} - -) -SheetPortal.displayName = SheetPrimitive.Portal.displayName - -const SheetOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - -)) -SheetOverlay.displayName = SheetPrimitive.Overlay.displayName - -const SheetContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - {children} - - - Close - - - -)) -SheetContent.displayName = SheetPrimitive.Content.displayName - -const SheetHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
    -) -SheetHeader.displayName = 'SheetHeader' - -const SheetFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
    -) -SheetFooter.displayName = 'SheetFooter' - -const SheetTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SheetTitle.displayName = SheetPrimitive.Title.displayName - -const SheetDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SheetDescription.displayName = SheetPrimitive.Description.displayName - -export { - Sheet, - SheetTrigger, - SheetClose, - SheetContent, - SheetHeader, - SheetFooter, - SheetTitle, - SheetDescription -} diff --git a/spaces/pkiage/time_series_decomposition_demo/app.py b/spaces/pkiage/time_series_decomposition_demo/app.py deleted file mode 100644 index 04a9aa7ecb9e559916dec40f5d36ad23a7d7be96..0000000000000000000000000000000000000000 --- a/spaces/pkiage/time_series_decomposition_demo/app.py +++ /dev/null @@ -1,85 +0,0 @@ -import streamlit as st -import pandas as pd -from src.data.utils import * -from src.visualization.visualize import * -from src.features.build_features import * - - -def main(): - - st.title("Time Series Decomposition Demo") - - st.header("Data") - - sample_data_selected = st.selectbox( - 'Select sample data:', data_set_options) - - data, graph_data = import_sample_data( - sample_data_selected, data_set_options) - - show_inputted_dataframe(data) - - with st.expander("Box Plot:"): - time_series_box_plot(graph_data) - - with st.expander("Dist Plot (histogram and violin plot):"): - time_series_violin_and_box_plot(data) - - st.header("Time series decomposition") - - [decomposition, selected_model_type] = decompose_time_series(data) - - if selected_model_type == model_types[0]: - st.subheader('Additive Model') - st.latex(r''' - Y[t] = T[t]+S[t]+e[t] - ''') - - if selected_model_type == model_types[1]: - st.subheader('Multiplicative Model') - st.latex(r''' - Y[t] = T[t] \times S[t] \times e[t] - ''') - - standard_decomposition_plot(decomposition) - - [trend, seasonal, residual] = extract_trend_seasonal_resid(decomposition) - - with st.expander("Time series Line Plot (Y[t])"): - time_series_line_plot(data) - - st.latex(r'''=''') - - with st.expander("Trend Plot (T[t])"): - st.write('The trend component of the data series.') - st.write('Trend: secular variation(long-term, non-periodic variation)') - - time_series_line_plot(trend) - - if selected_model_type == model_types[0]: - st.latex(r'''+''') - - if selected_model_type == model_types[1]: - st.latex(r'''\times''') - - with st.expander("Seasonality Plot (S[t])"): - st.write('The seasonal component of the data series.') - st.write( - 'Seasonality: Periodic fluctuations (often at short-term intervals less than a year).') - time_series_line_plot(seasonal) - - if selected_model_type == model_types[0]: - st.latex(r'''+''') - - if selected_model_type == model_types[1]: - st.latex(r'''\times''') - - with st.expander("Residual Plot (e[t])"): - st.write('The residual component of the data series.') - st.write('Residual: What remains after the other components have been removed (describes random, irregular influences).') - st.write(f'Residual mean: {residual.mean():.4f}') - time_series_scatter_plot(residual) - - -if __name__ == "__main__": - main() diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/idna/uts46data.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/idna/uts46data.py deleted file mode 100644 index 186796c17b25c1e766112ef4d9f16bb2dea4b306..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/idna/uts46data.py +++ /dev/null @@ -1,8600 +0,0 @@ -# This file is automatically generated by tools/idna-data -# vim: set fileencoding=utf-8 : - -from typing import List, Tuple, Union - - -"""IDNA Mapping Table from UTS46.""" - - -__version__ = '15.0.0' -def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x0, '3'), - (0x1, '3'), - (0x2, '3'), - (0x3, '3'), - (0x4, '3'), - (0x5, '3'), - (0x6, '3'), - (0x7, '3'), - (0x8, '3'), - (0x9, '3'), - (0xA, '3'), - (0xB, '3'), - (0xC, '3'), - (0xD, '3'), - (0xE, '3'), - (0xF, '3'), - (0x10, '3'), - (0x11, '3'), - (0x12, '3'), - (0x13, '3'), - (0x14, '3'), - (0x15, '3'), - (0x16, '3'), - (0x17, '3'), - (0x18, '3'), - (0x19, '3'), - (0x1A, '3'), - (0x1B, '3'), - (0x1C, '3'), - (0x1D, '3'), - (0x1E, '3'), - (0x1F, '3'), - (0x20, '3'), - (0x21, '3'), - (0x22, '3'), - (0x23, '3'), - (0x24, '3'), - (0x25, '3'), - (0x26, '3'), - (0x27, '3'), - (0x28, '3'), - (0x29, '3'), - (0x2A, '3'), - (0x2B, '3'), - (0x2C, '3'), - (0x2D, 'V'), - (0x2E, 'V'), - (0x2F, '3'), - (0x30, 'V'), - (0x31, 'V'), - (0x32, 'V'), - (0x33, 'V'), - (0x34, 'V'), - (0x35, 'V'), - (0x36, 'V'), - (0x37, 'V'), - (0x38, 'V'), - (0x39, 'V'), - (0x3A, '3'), - (0x3B, '3'), - (0x3C, '3'), - (0x3D, '3'), - (0x3E, '3'), - (0x3F, '3'), - (0x40, '3'), - (0x41, 'M', 'a'), - (0x42, 'M', 'b'), - (0x43, 'M', 'c'), - (0x44, 'M', 'd'), - (0x45, 'M', 'e'), - (0x46, 'M', 'f'), - (0x47, 'M', 'g'), - (0x48, 'M', 'h'), - (0x49, 'M', 'i'), - (0x4A, 'M', 'j'), - (0x4B, 'M', 'k'), - (0x4C, 'M', 'l'), - (0x4D, 'M', 'm'), - (0x4E, 'M', 'n'), - (0x4F, 'M', 'o'), - (0x50, 'M', 'p'), - (0x51, 'M', 'q'), - (0x52, 'M', 'r'), - (0x53, 'M', 's'), - (0x54, 'M', 't'), - (0x55, 'M', 'u'), - (0x56, 'M', 'v'), - (0x57, 'M', 'w'), - (0x58, 'M', 'x'), - (0x59, 'M', 'y'), - (0x5A, 'M', 'z'), - (0x5B, '3'), - (0x5C, '3'), - (0x5D, '3'), - (0x5E, '3'), - (0x5F, '3'), - (0x60, '3'), - (0x61, 'V'), - (0x62, 'V'), - (0x63, 'V'), - ] - -def _seg_1() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x64, 'V'), - (0x65, 'V'), - (0x66, 'V'), - (0x67, 'V'), - (0x68, 'V'), - (0x69, 'V'), - (0x6A, 'V'), - (0x6B, 'V'), - (0x6C, 'V'), - (0x6D, 'V'), - (0x6E, 'V'), - (0x6F, 'V'), - (0x70, 'V'), - (0x71, 'V'), - (0x72, 'V'), - (0x73, 'V'), - (0x74, 'V'), - (0x75, 'V'), - (0x76, 'V'), - (0x77, 'V'), - (0x78, 'V'), - (0x79, 'V'), - (0x7A, 'V'), - (0x7B, '3'), - (0x7C, '3'), - (0x7D, '3'), - (0x7E, '3'), - (0x7F, '3'), - (0x80, 'X'), - (0x81, 'X'), - (0x82, 'X'), - (0x83, 'X'), - (0x84, 'X'), - (0x85, 'X'), - (0x86, 'X'), - (0x87, 'X'), - (0x88, 'X'), - (0x89, 'X'), - (0x8A, 'X'), - (0x8B, 'X'), - (0x8C, 'X'), - (0x8D, 'X'), - (0x8E, 'X'), - (0x8F, 'X'), - (0x90, 'X'), - (0x91, 'X'), - (0x92, 'X'), - (0x93, 'X'), - (0x94, 'X'), - (0x95, 'X'), - (0x96, 'X'), - (0x97, 'X'), - (0x98, 'X'), - (0x99, 'X'), - (0x9A, 'X'), - (0x9B, 'X'), - (0x9C, 'X'), - (0x9D, 'X'), - (0x9E, 'X'), - (0x9F, 'X'), - (0xA0, '3', ' '), - (0xA1, 'V'), - (0xA2, 'V'), - (0xA3, 'V'), - (0xA4, 'V'), - (0xA5, 'V'), - (0xA6, 'V'), - (0xA7, 'V'), - (0xA8, '3', ' ̈'), - (0xA9, 'V'), - (0xAA, 'M', 'a'), - (0xAB, 'V'), - (0xAC, 'V'), - (0xAD, 'I'), - (0xAE, 'V'), - (0xAF, '3', ' ̄'), - (0xB0, 'V'), - (0xB1, 'V'), - (0xB2, 'M', '2'), - (0xB3, 'M', '3'), - (0xB4, '3', ' ́'), - (0xB5, 'M', 'μ'), - (0xB6, 'V'), - (0xB7, 'V'), - (0xB8, '3', ' ̧'), - (0xB9, 'M', '1'), - (0xBA, 'M', 'o'), - (0xBB, 'V'), - (0xBC, 'M', '1⁄4'), - (0xBD, 'M', '1⁄2'), - (0xBE, 'M', '3⁄4'), - (0xBF, 'V'), - (0xC0, 'M', 'à'), - (0xC1, 'M', 'á'), - (0xC2, 'M', 'â'), - (0xC3, 'M', 'ã'), - (0xC4, 'M', 'ä'), - (0xC5, 'M', 'å'), - (0xC6, 'M', 'æ'), - (0xC7, 'M', 'ç'), - ] - -def _seg_2() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xC8, 'M', 'è'), - (0xC9, 'M', 'é'), - (0xCA, 'M', 'ê'), - (0xCB, 'M', 'ë'), - (0xCC, 'M', 'ì'), - (0xCD, 'M', 'í'), - (0xCE, 'M', 'î'), - (0xCF, 'M', 'ï'), - (0xD0, 'M', 'ð'), - (0xD1, 'M', 'ñ'), - (0xD2, 'M', 'ò'), - (0xD3, 'M', 'ó'), - (0xD4, 'M', 'ô'), - (0xD5, 'M', 'õ'), - (0xD6, 'M', 'ö'), - (0xD7, 'V'), - (0xD8, 'M', 'ø'), - (0xD9, 'M', 'ù'), - (0xDA, 'M', 'ú'), - (0xDB, 'M', 'û'), - (0xDC, 'M', 'ü'), - (0xDD, 'M', 'ý'), - (0xDE, 'M', 'þ'), - (0xDF, 'D', 'ss'), - (0xE0, 'V'), - (0xE1, 'V'), - (0xE2, 'V'), - (0xE3, 'V'), - (0xE4, 'V'), - (0xE5, 'V'), - (0xE6, 'V'), - (0xE7, 'V'), - (0xE8, 'V'), - (0xE9, 'V'), - (0xEA, 'V'), - (0xEB, 'V'), - (0xEC, 'V'), - (0xED, 'V'), - (0xEE, 'V'), - (0xEF, 'V'), - (0xF0, 'V'), - (0xF1, 'V'), - (0xF2, 'V'), - (0xF3, 'V'), - (0xF4, 'V'), - (0xF5, 'V'), - (0xF6, 'V'), - (0xF7, 'V'), - (0xF8, 'V'), - (0xF9, 'V'), - (0xFA, 'V'), - (0xFB, 'V'), - (0xFC, 'V'), - (0xFD, 'V'), - (0xFE, 'V'), - (0xFF, 'V'), - (0x100, 'M', 'ā'), - (0x101, 'V'), - (0x102, 'M', 'ă'), - (0x103, 'V'), - (0x104, 'M', 'ą'), - (0x105, 'V'), - (0x106, 'M', 'ć'), - (0x107, 'V'), - (0x108, 'M', 'ĉ'), - (0x109, 'V'), - (0x10A, 'M', 'ċ'), - (0x10B, 'V'), - (0x10C, 'M', 'č'), - (0x10D, 'V'), - (0x10E, 'M', 'ď'), - (0x10F, 'V'), - (0x110, 'M', 'đ'), - (0x111, 'V'), - (0x112, 'M', 'ē'), - (0x113, 'V'), - (0x114, 'M', 'ĕ'), - (0x115, 'V'), - (0x116, 'M', 'ė'), - (0x117, 'V'), - (0x118, 'M', 'ę'), - (0x119, 'V'), - (0x11A, 'M', 'ě'), - (0x11B, 'V'), - (0x11C, 'M', 'ĝ'), - (0x11D, 'V'), - (0x11E, 'M', 'ğ'), - (0x11F, 'V'), - (0x120, 'M', 'ġ'), - (0x121, 'V'), - (0x122, 'M', 'ģ'), - (0x123, 'V'), - (0x124, 'M', 'ĥ'), - (0x125, 'V'), - (0x126, 'M', 'ħ'), - (0x127, 'V'), - (0x128, 'M', 'ĩ'), - (0x129, 'V'), - (0x12A, 'M', 'ī'), - (0x12B, 'V'), - ] - -def _seg_3() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x12C, 'M', 'ĭ'), - (0x12D, 'V'), - (0x12E, 'M', 'į'), - (0x12F, 'V'), - (0x130, 'M', 'i̇'), - (0x131, 'V'), - (0x132, 'M', 'ij'), - (0x134, 'M', 'ĵ'), - (0x135, 'V'), - (0x136, 'M', 'ķ'), - (0x137, 'V'), - (0x139, 'M', 'ĺ'), - (0x13A, 'V'), - (0x13B, 'M', 'ļ'), - (0x13C, 'V'), - (0x13D, 'M', 'ľ'), - (0x13E, 'V'), - (0x13F, 'M', 'l·'), - (0x141, 'M', 'ł'), - (0x142, 'V'), - (0x143, 'M', 'ń'), - (0x144, 'V'), - (0x145, 'M', 'ņ'), - (0x146, 'V'), - (0x147, 'M', 'ň'), - (0x148, 'V'), - (0x149, 'M', 'ʼn'), - (0x14A, 'M', 'ŋ'), - (0x14B, 'V'), - (0x14C, 'M', 'ō'), - (0x14D, 'V'), - (0x14E, 'M', 'ŏ'), - (0x14F, 'V'), - (0x150, 'M', 'ő'), - (0x151, 'V'), - (0x152, 'M', 'œ'), - (0x153, 'V'), - (0x154, 'M', 'ŕ'), - (0x155, 'V'), - (0x156, 'M', 'ŗ'), - (0x157, 'V'), - (0x158, 'M', 'ř'), - (0x159, 'V'), - (0x15A, 'M', 'ś'), - (0x15B, 'V'), - (0x15C, 'M', 'ŝ'), - (0x15D, 'V'), - (0x15E, 'M', 'ş'), - (0x15F, 'V'), - (0x160, 'M', 'š'), - (0x161, 'V'), - (0x162, 'M', 'ţ'), - (0x163, 'V'), - (0x164, 'M', 'ť'), - (0x165, 'V'), - (0x166, 'M', 'ŧ'), - (0x167, 'V'), - (0x168, 'M', 'ũ'), - (0x169, 'V'), - (0x16A, 'M', 'ū'), - (0x16B, 'V'), - (0x16C, 'M', 'ŭ'), - (0x16D, 'V'), - (0x16E, 'M', 'ů'), - (0x16F, 'V'), - (0x170, 'M', 'ű'), - (0x171, 'V'), - (0x172, 'M', 'ų'), - (0x173, 'V'), - (0x174, 'M', 'ŵ'), - (0x175, 'V'), - (0x176, 'M', 'ŷ'), - (0x177, 'V'), - (0x178, 'M', 'ÿ'), - (0x179, 'M', 'ź'), - (0x17A, 'V'), - (0x17B, 'M', 'ż'), - (0x17C, 'V'), - (0x17D, 'M', 'ž'), - (0x17E, 'V'), - (0x17F, 'M', 's'), - (0x180, 'V'), - (0x181, 'M', 'ɓ'), - (0x182, 'M', 'ƃ'), - (0x183, 'V'), - (0x184, 'M', 'ƅ'), - (0x185, 'V'), - (0x186, 'M', 'ɔ'), - (0x187, 'M', 'ƈ'), - (0x188, 'V'), - (0x189, 'M', 'ɖ'), - (0x18A, 'M', 'ɗ'), - (0x18B, 'M', 'ƌ'), - (0x18C, 'V'), - (0x18E, 'M', 'ǝ'), - (0x18F, 'M', 'ə'), - (0x190, 'M', 'ɛ'), - (0x191, 'M', 'ƒ'), - (0x192, 'V'), - (0x193, 'M', 'ɠ'), - ] - -def _seg_4() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x194, 'M', 'ɣ'), - (0x195, 'V'), - (0x196, 'M', 'ɩ'), - (0x197, 'M', 'ɨ'), - (0x198, 'M', 'ƙ'), - (0x199, 'V'), - (0x19C, 'M', 'ɯ'), - (0x19D, 'M', 'ɲ'), - (0x19E, 'V'), - (0x19F, 'M', 'ɵ'), - (0x1A0, 'M', 'ơ'), - (0x1A1, 'V'), - (0x1A2, 'M', 'ƣ'), - (0x1A3, 'V'), - (0x1A4, 'M', 'ƥ'), - (0x1A5, 'V'), - (0x1A6, 'M', 'ʀ'), - (0x1A7, 'M', 'ƨ'), - (0x1A8, 'V'), - (0x1A9, 'M', 'ʃ'), - (0x1AA, 'V'), - (0x1AC, 'M', 'ƭ'), - (0x1AD, 'V'), - (0x1AE, 'M', 'ʈ'), - (0x1AF, 'M', 'ư'), - (0x1B0, 'V'), - (0x1B1, 'M', 'ʊ'), - (0x1B2, 'M', 'ʋ'), - (0x1B3, 'M', 'ƴ'), - (0x1B4, 'V'), - (0x1B5, 'M', 'ƶ'), - (0x1B6, 'V'), - (0x1B7, 'M', 'ʒ'), - (0x1B8, 'M', 'ƹ'), - (0x1B9, 'V'), - (0x1BC, 'M', 'ƽ'), - (0x1BD, 'V'), - (0x1C4, 'M', 'dž'), - (0x1C7, 'M', 'lj'), - (0x1CA, 'M', 'nj'), - (0x1CD, 'M', 'ǎ'), - (0x1CE, 'V'), - (0x1CF, 'M', 'ǐ'), - (0x1D0, 'V'), - (0x1D1, 'M', 'ǒ'), - (0x1D2, 'V'), - (0x1D3, 'M', 'ǔ'), - (0x1D4, 'V'), - (0x1D5, 'M', 'ǖ'), - (0x1D6, 'V'), - (0x1D7, 'M', 'ǘ'), - (0x1D8, 'V'), - (0x1D9, 'M', 'ǚ'), - (0x1DA, 'V'), - (0x1DB, 'M', 'ǜ'), - (0x1DC, 'V'), - (0x1DE, 'M', 'ǟ'), - (0x1DF, 'V'), - (0x1E0, 'M', 'ǡ'), - (0x1E1, 'V'), - (0x1E2, 'M', 'ǣ'), - (0x1E3, 'V'), - (0x1E4, 'M', 'ǥ'), - (0x1E5, 'V'), - (0x1E6, 'M', 'ǧ'), - (0x1E7, 'V'), - (0x1E8, 'M', 'ǩ'), - (0x1E9, 'V'), - (0x1EA, 'M', 'ǫ'), - (0x1EB, 'V'), - (0x1EC, 'M', 'ǭ'), - (0x1ED, 'V'), - (0x1EE, 'M', 'ǯ'), - (0x1EF, 'V'), - (0x1F1, 'M', 'dz'), - (0x1F4, 'M', 'ǵ'), - (0x1F5, 'V'), - (0x1F6, 'M', 'ƕ'), - (0x1F7, 'M', 'ƿ'), - (0x1F8, 'M', 'ǹ'), - (0x1F9, 'V'), - (0x1FA, 'M', 'ǻ'), - (0x1FB, 'V'), - (0x1FC, 'M', 'ǽ'), - (0x1FD, 'V'), - (0x1FE, 'M', 'ǿ'), - (0x1FF, 'V'), - (0x200, 'M', 'ȁ'), - (0x201, 'V'), - (0x202, 'M', 'ȃ'), - (0x203, 'V'), - (0x204, 'M', 'ȅ'), - (0x205, 'V'), - (0x206, 'M', 'ȇ'), - (0x207, 'V'), - (0x208, 'M', 'ȉ'), - (0x209, 'V'), - (0x20A, 'M', 'ȋ'), - (0x20B, 'V'), - (0x20C, 'M', 'ȍ'), - ] - -def _seg_5() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x20D, 'V'), - (0x20E, 'M', 'ȏ'), - (0x20F, 'V'), - (0x210, 'M', 'ȑ'), - (0x211, 'V'), - (0x212, 'M', 'ȓ'), - (0x213, 'V'), - (0x214, 'M', 'ȕ'), - (0x215, 'V'), - (0x216, 'M', 'ȗ'), - (0x217, 'V'), - (0x218, 'M', 'ș'), - (0x219, 'V'), - (0x21A, 'M', 'ț'), - (0x21B, 'V'), - (0x21C, 'M', 'ȝ'), - (0x21D, 'V'), - (0x21E, 'M', 'ȟ'), - (0x21F, 'V'), - (0x220, 'M', 'ƞ'), - (0x221, 'V'), - (0x222, 'M', 'ȣ'), - (0x223, 'V'), - (0x224, 'M', 'ȥ'), - (0x225, 'V'), - (0x226, 'M', 'ȧ'), - (0x227, 'V'), - (0x228, 'M', 'ȩ'), - (0x229, 'V'), - (0x22A, 'M', 'ȫ'), - (0x22B, 'V'), - (0x22C, 'M', 'ȭ'), - (0x22D, 'V'), - (0x22E, 'M', 'ȯ'), - (0x22F, 'V'), - (0x230, 'M', 'ȱ'), - (0x231, 'V'), - (0x232, 'M', 'ȳ'), - (0x233, 'V'), - (0x23A, 'M', 'ⱥ'), - (0x23B, 'M', 'ȼ'), - (0x23C, 'V'), - (0x23D, 'M', 'ƚ'), - (0x23E, 'M', 'ⱦ'), - (0x23F, 'V'), - (0x241, 'M', 'ɂ'), - (0x242, 'V'), - (0x243, 'M', 'ƀ'), - (0x244, 'M', 'ʉ'), - (0x245, 'M', 'ʌ'), - (0x246, 'M', 'ɇ'), - (0x247, 'V'), - (0x248, 'M', 'ɉ'), - (0x249, 'V'), - (0x24A, 'M', 'ɋ'), - (0x24B, 'V'), - (0x24C, 'M', 'ɍ'), - (0x24D, 'V'), - (0x24E, 'M', 'ɏ'), - (0x24F, 'V'), - (0x2B0, 'M', 'h'), - (0x2B1, 'M', 'ɦ'), - (0x2B2, 'M', 'j'), - (0x2B3, 'M', 'r'), - (0x2B4, 'M', 'ɹ'), - (0x2B5, 'M', 'ɻ'), - (0x2B6, 'M', 'ʁ'), - (0x2B7, 'M', 'w'), - (0x2B8, 'M', 'y'), - (0x2B9, 'V'), - (0x2D8, '3', ' ̆'), - (0x2D9, '3', ' ̇'), - (0x2DA, '3', ' ̊'), - (0x2DB, '3', ' ̨'), - (0x2DC, '3', ' ̃'), - (0x2DD, '3', ' ̋'), - (0x2DE, 'V'), - (0x2E0, 'M', 'ɣ'), - (0x2E1, 'M', 'l'), - (0x2E2, 'M', 's'), - (0x2E3, 'M', 'x'), - (0x2E4, 'M', 'ʕ'), - (0x2E5, 'V'), - (0x340, 'M', '̀'), - (0x341, 'M', '́'), - (0x342, 'V'), - (0x343, 'M', '̓'), - (0x344, 'M', '̈́'), - (0x345, 'M', 'ι'), - (0x346, 'V'), - (0x34F, 'I'), - (0x350, 'V'), - (0x370, 'M', 'ͱ'), - (0x371, 'V'), - (0x372, 'M', 'ͳ'), - (0x373, 'V'), - (0x374, 'M', 'ʹ'), - (0x375, 'V'), - (0x376, 'M', 'ͷ'), - (0x377, 'V'), - ] - -def _seg_6() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x378, 'X'), - (0x37A, '3', ' ι'), - (0x37B, 'V'), - (0x37E, '3', ';'), - (0x37F, 'M', 'ϳ'), - (0x380, 'X'), - (0x384, '3', ' ́'), - (0x385, '3', ' ̈́'), - (0x386, 'M', 'ά'), - (0x387, 'M', '·'), - (0x388, 'M', 'έ'), - (0x389, 'M', 'ή'), - (0x38A, 'M', 'ί'), - (0x38B, 'X'), - (0x38C, 'M', 'ό'), - (0x38D, 'X'), - (0x38E, 'M', 'ύ'), - (0x38F, 'M', 'ώ'), - (0x390, 'V'), - (0x391, 'M', 'α'), - (0x392, 'M', 'β'), - (0x393, 'M', 'γ'), - (0x394, 'M', 'δ'), - (0x395, 'M', 'ε'), - (0x396, 'M', 'ζ'), - (0x397, 'M', 'η'), - (0x398, 'M', 'θ'), - (0x399, 'M', 'ι'), - (0x39A, 'M', 'κ'), - (0x39B, 'M', 'λ'), - (0x39C, 'M', 'μ'), - (0x39D, 'M', 'ν'), - (0x39E, 'M', 'ξ'), - (0x39F, 'M', 'ο'), - (0x3A0, 'M', 'π'), - (0x3A1, 'M', 'ρ'), - (0x3A2, 'X'), - (0x3A3, 'M', 'σ'), - (0x3A4, 'M', 'τ'), - (0x3A5, 'M', 'υ'), - (0x3A6, 'M', 'φ'), - (0x3A7, 'M', 'χ'), - (0x3A8, 'M', 'ψ'), - (0x3A9, 'M', 'ω'), - (0x3AA, 'M', 'ϊ'), - (0x3AB, 'M', 'ϋ'), - (0x3AC, 'V'), - (0x3C2, 'D', 'σ'), - (0x3C3, 'V'), - (0x3CF, 'M', 'ϗ'), - (0x3D0, 'M', 'β'), - (0x3D1, 'M', 'θ'), - (0x3D2, 'M', 'υ'), - (0x3D3, 'M', 'ύ'), - (0x3D4, 'M', 'ϋ'), - (0x3D5, 'M', 'φ'), - (0x3D6, 'M', 'π'), - (0x3D7, 'V'), - (0x3D8, 'M', 'ϙ'), - (0x3D9, 'V'), - (0x3DA, 'M', 'ϛ'), - (0x3DB, 'V'), - (0x3DC, 'M', 'ϝ'), - (0x3DD, 'V'), - (0x3DE, 'M', 'ϟ'), - (0x3DF, 'V'), - (0x3E0, 'M', 'ϡ'), - (0x3E1, 'V'), - (0x3E2, 'M', 'ϣ'), - (0x3E3, 'V'), - (0x3E4, 'M', 'ϥ'), - (0x3E5, 'V'), - (0x3E6, 'M', 'ϧ'), - (0x3E7, 'V'), - (0x3E8, 'M', 'ϩ'), - (0x3E9, 'V'), - (0x3EA, 'M', 'ϫ'), - (0x3EB, 'V'), - (0x3EC, 'M', 'ϭ'), - (0x3ED, 'V'), - (0x3EE, 'M', 'ϯ'), - (0x3EF, 'V'), - (0x3F0, 'M', 'κ'), - (0x3F1, 'M', 'ρ'), - (0x3F2, 'M', 'σ'), - (0x3F3, 'V'), - (0x3F4, 'M', 'θ'), - (0x3F5, 'M', 'ε'), - (0x3F6, 'V'), - (0x3F7, 'M', 'ϸ'), - (0x3F8, 'V'), - (0x3F9, 'M', 'σ'), - (0x3FA, 'M', 'ϻ'), - (0x3FB, 'V'), - (0x3FD, 'M', 'ͻ'), - (0x3FE, 'M', 'ͼ'), - (0x3FF, 'M', 'ͽ'), - (0x400, 'M', 'ѐ'), - (0x401, 'M', 'ё'), - (0x402, 'M', 'ђ'), - ] - -def _seg_7() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x403, 'M', 'ѓ'), - (0x404, 'M', 'є'), - (0x405, 'M', 'ѕ'), - (0x406, 'M', 'і'), - (0x407, 'M', 'ї'), - (0x408, 'M', 'ј'), - (0x409, 'M', 'љ'), - (0x40A, 'M', 'њ'), - (0x40B, 'M', 'ћ'), - (0x40C, 'M', 'ќ'), - (0x40D, 'M', 'ѝ'), - (0x40E, 'M', 'ў'), - (0x40F, 'M', 'џ'), - (0x410, 'M', 'а'), - (0x411, 'M', 'б'), - (0x412, 'M', 'в'), - (0x413, 'M', 'г'), - (0x414, 'M', 'д'), - (0x415, 'M', 'е'), - (0x416, 'M', 'ж'), - (0x417, 'M', 'з'), - (0x418, 'M', 'и'), - (0x419, 'M', 'й'), - (0x41A, 'M', 'к'), - (0x41B, 'M', 'л'), - (0x41C, 'M', 'м'), - (0x41D, 'M', 'н'), - (0x41E, 'M', 'о'), - (0x41F, 'M', 'п'), - (0x420, 'M', 'р'), - (0x421, 'M', 'с'), - (0x422, 'M', 'т'), - (0x423, 'M', 'у'), - (0x424, 'M', 'ф'), - (0x425, 'M', 'х'), - (0x426, 'M', 'ц'), - (0x427, 'M', 'ч'), - (0x428, 'M', 'ш'), - (0x429, 'M', 'щ'), - (0x42A, 'M', 'ъ'), - (0x42B, 'M', 'ы'), - (0x42C, 'M', 'ь'), - (0x42D, 'M', 'э'), - (0x42E, 'M', 'ю'), - (0x42F, 'M', 'я'), - (0x430, 'V'), - (0x460, 'M', 'ѡ'), - (0x461, 'V'), - (0x462, 'M', 'ѣ'), - (0x463, 'V'), - (0x464, 'M', 'ѥ'), - (0x465, 'V'), - (0x466, 'M', 'ѧ'), - (0x467, 'V'), - (0x468, 'M', 'ѩ'), - (0x469, 'V'), - (0x46A, 'M', 'ѫ'), - (0x46B, 'V'), - (0x46C, 'M', 'ѭ'), - (0x46D, 'V'), - (0x46E, 'M', 'ѯ'), - (0x46F, 'V'), - (0x470, 'M', 'ѱ'), - (0x471, 'V'), - (0x472, 'M', 'ѳ'), - (0x473, 'V'), - (0x474, 'M', 'ѵ'), - (0x475, 'V'), - (0x476, 'M', 'ѷ'), - (0x477, 'V'), - (0x478, 'M', 'ѹ'), - (0x479, 'V'), - (0x47A, 'M', 'ѻ'), - (0x47B, 'V'), - (0x47C, 'M', 'ѽ'), - (0x47D, 'V'), - (0x47E, 'M', 'ѿ'), - (0x47F, 'V'), - (0x480, 'M', 'ҁ'), - (0x481, 'V'), - (0x48A, 'M', 'ҋ'), - (0x48B, 'V'), - (0x48C, 'M', 'ҍ'), - (0x48D, 'V'), - (0x48E, 'M', 'ҏ'), - (0x48F, 'V'), - (0x490, 'M', 'ґ'), - (0x491, 'V'), - (0x492, 'M', 'ғ'), - (0x493, 'V'), - (0x494, 'M', 'ҕ'), - (0x495, 'V'), - (0x496, 'M', 'җ'), - (0x497, 'V'), - (0x498, 'M', 'ҙ'), - (0x499, 'V'), - (0x49A, 'M', 'қ'), - (0x49B, 'V'), - (0x49C, 'M', 'ҝ'), - (0x49D, 'V'), - ] - -def _seg_8() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x49E, 'M', 'ҟ'), - (0x49F, 'V'), - (0x4A0, 'M', 'ҡ'), - (0x4A1, 'V'), - (0x4A2, 'M', 'ң'), - (0x4A3, 'V'), - (0x4A4, 'M', 'ҥ'), - (0x4A5, 'V'), - (0x4A6, 'M', 'ҧ'), - (0x4A7, 'V'), - (0x4A8, 'M', 'ҩ'), - (0x4A9, 'V'), - (0x4AA, 'M', 'ҫ'), - (0x4AB, 'V'), - (0x4AC, 'M', 'ҭ'), - (0x4AD, 'V'), - (0x4AE, 'M', 'ү'), - (0x4AF, 'V'), - (0x4B0, 'M', 'ұ'), - (0x4B1, 'V'), - (0x4B2, 'M', 'ҳ'), - (0x4B3, 'V'), - (0x4B4, 'M', 'ҵ'), - (0x4B5, 'V'), - (0x4B6, 'M', 'ҷ'), - (0x4B7, 'V'), - (0x4B8, 'M', 'ҹ'), - (0x4B9, 'V'), - (0x4BA, 'M', 'һ'), - (0x4BB, 'V'), - (0x4BC, 'M', 'ҽ'), - (0x4BD, 'V'), - (0x4BE, 'M', 'ҿ'), - (0x4BF, 'V'), - (0x4C0, 'X'), - (0x4C1, 'M', 'ӂ'), - (0x4C2, 'V'), - (0x4C3, 'M', 'ӄ'), - (0x4C4, 'V'), - (0x4C5, 'M', 'ӆ'), - (0x4C6, 'V'), - (0x4C7, 'M', 'ӈ'), - (0x4C8, 'V'), - (0x4C9, 'M', 'ӊ'), - (0x4CA, 'V'), - (0x4CB, 'M', 'ӌ'), - (0x4CC, 'V'), - (0x4CD, 'M', 'ӎ'), - (0x4CE, 'V'), - (0x4D0, 'M', 'ӑ'), - (0x4D1, 'V'), - (0x4D2, 'M', 'ӓ'), - (0x4D3, 'V'), - (0x4D4, 'M', 'ӕ'), - (0x4D5, 'V'), - (0x4D6, 'M', 'ӗ'), - (0x4D7, 'V'), - (0x4D8, 'M', 'ә'), - (0x4D9, 'V'), - (0x4DA, 'M', 'ӛ'), - (0x4DB, 'V'), - (0x4DC, 'M', 'ӝ'), - (0x4DD, 'V'), - (0x4DE, 'M', 'ӟ'), - (0x4DF, 'V'), - (0x4E0, 'M', 'ӡ'), - (0x4E1, 'V'), - (0x4E2, 'M', 'ӣ'), - (0x4E3, 'V'), - (0x4E4, 'M', 'ӥ'), - (0x4E5, 'V'), - (0x4E6, 'M', 'ӧ'), - (0x4E7, 'V'), - (0x4E8, 'M', 'ө'), - (0x4E9, 'V'), - (0x4EA, 'M', 'ӫ'), - (0x4EB, 'V'), - (0x4EC, 'M', 'ӭ'), - (0x4ED, 'V'), - (0x4EE, 'M', 'ӯ'), - (0x4EF, 'V'), - (0x4F0, 'M', 'ӱ'), - (0x4F1, 'V'), - (0x4F2, 'M', 'ӳ'), - (0x4F3, 'V'), - (0x4F4, 'M', 'ӵ'), - (0x4F5, 'V'), - (0x4F6, 'M', 'ӷ'), - (0x4F7, 'V'), - (0x4F8, 'M', 'ӹ'), - (0x4F9, 'V'), - (0x4FA, 'M', 'ӻ'), - (0x4FB, 'V'), - (0x4FC, 'M', 'ӽ'), - (0x4FD, 'V'), - (0x4FE, 'M', 'ӿ'), - (0x4FF, 'V'), - (0x500, 'M', 'ԁ'), - (0x501, 'V'), - (0x502, 'M', 'ԃ'), - ] - -def _seg_9() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x503, 'V'), - (0x504, 'M', 'ԅ'), - (0x505, 'V'), - (0x506, 'M', 'ԇ'), - (0x507, 'V'), - (0x508, 'M', 'ԉ'), - (0x509, 'V'), - (0x50A, 'M', 'ԋ'), - (0x50B, 'V'), - (0x50C, 'M', 'ԍ'), - (0x50D, 'V'), - (0x50E, 'M', 'ԏ'), - (0x50F, 'V'), - (0x510, 'M', 'ԑ'), - (0x511, 'V'), - (0x512, 'M', 'ԓ'), - (0x513, 'V'), - (0x514, 'M', 'ԕ'), - (0x515, 'V'), - (0x516, 'M', 'ԗ'), - (0x517, 'V'), - (0x518, 'M', 'ԙ'), - (0x519, 'V'), - (0x51A, 'M', 'ԛ'), - (0x51B, 'V'), - (0x51C, 'M', 'ԝ'), - (0x51D, 'V'), - (0x51E, 'M', 'ԟ'), - (0x51F, 'V'), - (0x520, 'M', 'ԡ'), - (0x521, 'V'), - (0x522, 'M', 'ԣ'), - (0x523, 'V'), - (0x524, 'M', 'ԥ'), - (0x525, 'V'), - (0x526, 'M', 'ԧ'), - (0x527, 'V'), - (0x528, 'M', 'ԩ'), - (0x529, 'V'), - (0x52A, 'M', 'ԫ'), - (0x52B, 'V'), - (0x52C, 'M', 'ԭ'), - (0x52D, 'V'), - (0x52E, 'M', 'ԯ'), - (0x52F, 'V'), - (0x530, 'X'), - (0x531, 'M', 'ա'), - (0x532, 'M', 'բ'), - (0x533, 'M', 'գ'), - (0x534, 'M', 'դ'), - (0x535, 'M', 'ե'), - (0x536, 'M', 'զ'), - (0x537, 'M', 'է'), - (0x538, 'M', 'ը'), - (0x539, 'M', 'թ'), - (0x53A, 'M', 'ժ'), - (0x53B, 'M', 'ի'), - (0x53C, 'M', 'լ'), - (0x53D, 'M', 'խ'), - (0x53E, 'M', 'ծ'), - (0x53F, 'M', 'կ'), - (0x540, 'M', 'հ'), - (0x541, 'M', 'ձ'), - (0x542, 'M', 'ղ'), - (0x543, 'M', 'ճ'), - (0x544, 'M', 'մ'), - (0x545, 'M', 'յ'), - (0x546, 'M', 'ն'), - (0x547, 'M', 'շ'), - (0x548, 'M', 'ո'), - (0x549, 'M', 'չ'), - (0x54A, 'M', 'պ'), - (0x54B, 'M', 'ջ'), - (0x54C, 'M', 'ռ'), - (0x54D, 'M', 'ս'), - (0x54E, 'M', 'վ'), - (0x54F, 'M', 'տ'), - (0x550, 'M', 'ր'), - (0x551, 'M', 'ց'), - (0x552, 'M', 'ւ'), - (0x553, 'M', 'փ'), - (0x554, 'M', 'ք'), - (0x555, 'M', 'օ'), - (0x556, 'M', 'ֆ'), - (0x557, 'X'), - (0x559, 'V'), - (0x587, 'M', 'եւ'), - (0x588, 'V'), - (0x58B, 'X'), - (0x58D, 'V'), - (0x590, 'X'), - (0x591, 'V'), - (0x5C8, 'X'), - (0x5D0, 'V'), - (0x5EB, 'X'), - (0x5EF, 'V'), - (0x5F5, 'X'), - (0x606, 'V'), - (0x61C, 'X'), - (0x61D, 'V'), - ] - -def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x675, 'M', 'اٴ'), - (0x676, 'M', 'وٴ'), - (0x677, 'M', 'ۇٴ'), - (0x678, 'M', 'يٴ'), - (0x679, 'V'), - (0x6DD, 'X'), - (0x6DE, 'V'), - (0x70E, 'X'), - (0x710, 'V'), - (0x74B, 'X'), - (0x74D, 'V'), - (0x7B2, 'X'), - (0x7C0, 'V'), - (0x7FB, 'X'), - (0x7FD, 'V'), - (0x82E, 'X'), - (0x830, 'V'), - (0x83F, 'X'), - (0x840, 'V'), - (0x85C, 'X'), - (0x85E, 'V'), - (0x85F, 'X'), - (0x860, 'V'), - (0x86B, 'X'), - (0x870, 'V'), - (0x88F, 'X'), - (0x898, 'V'), - (0x8E2, 'X'), - (0x8E3, 'V'), - (0x958, 'M', 'क़'), - (0x959, 'M', 'ख़'), - (0x95A, 'M', 'ग़'), - (0x95B, 'M', 'ज़'), - (0x95C, 'M', 'ड़'), - (0x95D, 'M', 'ढ़'), - (0x95E, 'M', 'फ़'), - (0x95F, 'M', 'य़'), - (0x960, 'V'), - (0x984, 'X'), - (0x985, 'V'), - (0x98D, 'X'), - (0x98F, 'V'), - (0x991, 'X'), - (0x993, 'V'), - (0x9A9, 'X'), - (0x9AA, 'V'), - (0x9B1, 'X'), - (0x9B2, 'V'), - (0x9B3, 'X'), - (0x9B6, 'V'), - (0x9BA, 'X'), - (0x9BC, 'V'), - (0x9C5, 'X'), - (0x9C7, 'V'), - (0x9C9, 'X'), - (0x9CB, 'V'), - (0x9CF, 'X'), - (0x9D7, 'V'), - (0x9D8, 'X'), - (0x9DC, 'M', 'ড়'), - (0x9DD, 'M', 'ঢ়'), - (0x9DE, 'X'), - (0x9DF, 'M', 'য়'), - (0x9E0, 'V'), - (0x9E4, 'X'), - (0x9E6, 'V'), - (0x9FF, 'X'), - (0xA01, 'V'), - (0xA04, 'X'), - (0xA05, 'V'), - (0xA0B, 'X'), - (0xA0F, 'V'), - (0xA11, 'X'), - (0xA13, 'V'), - (0xA29, 'X'), - (0xA2A, 'V'), - (0xA31, 'X'), - (0xA32, 'V'), - (0xA33, 'M', 'ਲ਼'), - (0xA34, 'X'), - (0xA35, 'V'), - (0xA36, 'M', 'ਸ਼'), - (0xA37, 'X'), - (0xA38, 'V'), - (0xA3A, 'X'), - (0xA3C, 'V'), - (0xA3D, 'X'), - (0xA3E, 'V'), - (0xA43, 'X'), - (0xA47, 'V'), - (0xA49, 'X'), - (0xA4B, 'V'), - (0xA4E, 'X'), - (0xA51, 'V'), - (0xA52, 'X'), - (0xA59, 'M', 'ਖ਼'), - (0xA5A, 'M', 'ਗ਼'), - (0xA5B, 'M', 'ਜ਼'), - (0xA5C, 'V'), - (0xA5D, 'X'), - ] - -def _seg_11() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA5E, 'M', 'ਫ਼'), - (0xA5F, 'X'), - (0xA66, 'V'), - (0xA77, 'X'), - (0xA81, 'V'), - (0xA84, 'X'), - (0xA85, 'V'), - (0xA8E, 'X'), - (0xA8F, 'V'), - (0xA92, 'X'), - (0xA93, 'V'), - (0xAA9, 'X'), - (0xAAA, 'V'), - (0xAB1, 'X'), - (0xAB2, 'V'), - (0xAB4, 'X'), - (0xAB5, 'V'), - (0xABA, 'X'), - (0xABC, 'V'), - (0xAC6, 'X'), - (0xAC7, 'V'), - (0xACA, 'X'), - (0xACB, 'V'), - (0xACE, 'X'), - (0xAD0, 'V'), - (0xAD1, 'X'), - (0xAE0, 'V'), - (0xAE4, 'X'), - (0xAE6, 'V'), - (0xAF2, 'X'), - (0xAF9, 'V'), - (0xB00, 'X'), - (0xB01, 'V'), - (0xB04, 'X'), - (0xB05, 'V'), - (0xB0D, 'X'), - (0xB0F, 'V'), - (0xB11, 'X'), - (0xB13, 'V'), - (0xB29, 'X'), - (0xB2A, 'V'), - (0xB31, 'X'), - (0xB32, 'V'), - (0xB34, 'X'), - (0xB35, 'V'), - (0xB3A, 'X'), - (0xB3C, 'V'), - (0xB45, 'X'), - (0xB47, 'V'), - (0xB49, 'X'), - (0xB4B, 'V'), - (0xB4E, 'X'), - (0xB55, 'V'), - (0xB58, 'X'), - (0xB5C, 'M', 'ଡ଼'), - (0xB5D, 'M', 'ଢ଼'), - (0xB5E, 'X'), - (0xB5F, 'V'), - (0xB64, 'X'), - (0xB66, 'V'), - (0xB78, 'X'), - (0xB82, 'V'), - (0xB84, 'X'), - (0xB85, 'V'), - (0xB8B, 'X'), - (0xB8E, 'V'), - (0xB91, 'X'), - (0xB92, 'V'), - (0xB96, 'X'), - (0xB99, 'V'), - (0xB9B, 'X'), - (0xB9C, 'V'), - (0xB9D, 'X'), - (0xB9E, 'V'), - (0xBA0, 'X'), - (0xBA3, 'V'), - (0xBA5, 'X'), - (0xBA8, 'V'), - (0xBAB, 'X'), - (0xBAE, 'V'), - (0xBBA, 'X'), - (0xBBE, 'V'), - (0xBC3, 'X'), - (0xBC6, 'V'), - (0xBC9, 'X'), - (0xBCA, 'V'), - (0xBCE, 'X'), - (0xBD0, 'V'), - (0xBD1, 'X'), - (0xBD7, 'V'), - (0xBD8, 'X'), - (0xBE6, 'V'), - (0xBFB, 'X'), - (0xC00, 'V'), - (0xC0D, 'X'), - (0xC0E, 'V'), - (0xC11, 'X'), - (0xC12, 'V'), - (0xC29, 'X'), - (0xC2A, 'V'), - ] - -def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xC3A, 'X'), - (0xC3C, 'V'), - (0xC45, 'X'), - (0xC46, 'V'), - (0xC49, 'X'), - (0xC4A, 'V'), - (0xC4E, 'X'), - (0xC55, 'V'), - (0xC57, 'X'), - (0xC58, 'V'), - (0xC5B, 'X'), - (0xC5D, 'V'), - (0xC5E, 'X'), - (0xC60, 'V'), - (0xC64, 'X'), - (0xC66, 'V'), - (0xC70, 'X'), - (0xC77, 'V'), - (0xC8D, 'X'), - (0xC8E, 'V'), - (0xC91, 'X'), - (0xC92, 'V'), - (0xCA9, 'X'), - (0xCAA, 'V'), - (0xCB4, 'X'), - (0xCB5, 'V'), - (0xCBA, 'X'), - (0xCBC, 'V'), - (0xCC5, 'X'), - (0xCC6, 'V'), - (0xCC9, 'X'), - (0xCCA, 'V'), - (0xCCE, 'X'), - (0xCD5, 'V'), - (0xCD7, 'X'), - (0xCDD, 'V'), - (0xCDF, 'X'), - (0xCE0, 'V'), - (0xCE4, 'X'), - (0xCE6, 'V'), - (0xCF0, 'X'), - (0xCF1, 'V'), - (0xCF4, 'X'), - (0xD00, 'V'), - (0xD0D, 'X'), - (0xD0E, 'V'), - (0xD11, 'X'), - (0xD12, 'V'), - (0xD45, 'X'), - (0xD46, 'V'), - (0xD49, 'X'), - (0xD4A, 'V'), - (0xD50, 'X'), - (0xD54, 'V'), - (0xD64, 'X'), - (0xD66, 'V'), - (0xD80, 'X'), - (0xD81, 'V'), - (0xD84, 'X'), - (0xD85, 'V'), - (0xD97, 'X'), - (0xD9A, 'V'), - (0xDB2, 'X'), - (0xDB3, 'V'), - (0xDBC, 'X'), - (0xDBD, 'V'), - (0xDBE, 'X'), - (0xDC0, 'V'), - (0xDC7, 'X'), - (0xDCA, 'V'), - (0xDCB, 'X'), - (0xDCF, 'V'), - (0xDD5, 'X'), - (0xDD6, 'V'), - (0xDD7, 'X'), - (0xDD8, 'V'), - (0xDE0, 'X'), - (0xDE6, 'V'), - (0xDF0, 'X'), - (0xDF2, 'V'), - (0xDF5, 'X'), - (0xE01, 'V'), - (0xE33, 'M', 'ํา'), - (0xE34, 'V'), - (0xE3B, 'X'), - (0xE3F, 'V'), - (0xE5C, 'X'), - (0xE81, 'V'), - (0xE83, 'X'), - (0xE84, 'V'), - (0xE85, 'X'), - (0xE86, 'V'), - (0xE8B, 'X'), - (0xE8C, 'V'), - (0xEA4, 'X'), - (0xEA5, 'V'), - (0xEA6, 'X'), - (0xEA7, 'V'), - (0xEB3, 'M', 'ໍາ'), - (0xEB4, 'V'), - ] - -def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xEBE, 'X'), - (0xEC0, 'V'), - (0xEC5, 'X'), - (0xEC6, 'V'), - (0xEC7, 'X'), - (0xEC8, 'V'), - (0xECF, 'X'), - (0xED0, 'V'), - (0xEDA, 'X'), - (0xEDC, 'M', 'ຫນ'), - (0xEDD, 'M', 'ຫມ'), - (0xEDE, 'V'), - (0xEE0, 'X'), - (0xF00, 'V'), - (0xF0C, 'M', '་'), - (0xF0D, 'V'), - (0xF43, 'M', 'གྷ'), - (0xF44, 'V'), - (0xF48, 'X'), - (0xF49, 'V'), - (0xF4D, 'M', 'ཌྷ'), - (0xF4E, 'V'), - (0xF52, 'M', 'དྷ'), - (0xF53, 'V'), - (0xF57, 'M', 'བྷ'), - (0xF58, 'V'), - (0xF5C, 'M', 'ཛྷ'), - (0xF5D, 'V'), - (0xF69, 'M', 'ཀྵ'), - (0xF6A, 'V'), - (0xF6D, 'X'), - (0xF71, 'V'), - (0xF73, 'M', 'ཱི'), - (0xF74, 'V'), - (0xF75, 'M', 'ཱུ'), - (0xF76, 'M', 'ྲྀ'), - (0xF77, 'M', 'ྲཱྀ'), - (0xF78, 'M', 'ླྀ'), - (0xF79, 'M', 'ླཱྀ'), - (0xF7A, 'V'), - (0xF81, 'M', 'ཱྀ'), - (0xF82, 'V'), - (0xF93, 'M', 'ྒྷ'), - (0xF94, 'V'), - (0xF98, 'X'), - (0xF99, 'V'), - (0xF9D, 'M', 'ྜྷ'), - (0xF9E, 'V'), - (0xFA2, 'M', 'ྡྷ'), - (0xFA3, 'V'), - (0xFA7, 'M', 'ྦྷ'), - (0xFA8, 'V'), - (0xFAC, 'M', 'ྫྷ'), - (0xFAD, 'V'), - (0xFB9, 'M', 'ྐྵ'), - (0xFBA, 'V'), - (0xFBD, 'X'), - (0xFBE, 'V'), - (0xFCD, 'X'), - (0xFCE, 'V'), - (0xFDB, 'X'), - (0x1000, 'V'), - (0x10A0, 'X'), - (0x10C7, 'M', 'ⴧ'), - (0x10C8, 'X'), - (0x10CD, 'M', 'ⴭ'), - (0x10CE, 'X'), - (0x10D0, 'V'), - (0x10FC, 'M', 'ნ'), - (0x10FD, 'V'), - (0x115F, 'X'), - (0x1161, 'V'), - (0x1249, 'X'), - (0x124A, 'V'), - (0x124E, 'X'), - (0x1250, 'V'), - (0x1257, 'X'), - (0x1258, 'V'), - (0x1259, 'X'), - (0x125A, 'V'), - (0x125E, 'X'), - (0x1260, 'V'), - (0x1289, 'X'), - (0x128A, 'V'), - (0x128E, 'X'), - (0x1290, 'V'), - (0x12B1, 'X'), - (0x12B2, 'V'), - (0x12B6, 'X'), - (0x12B8, 'V'), - (0x12BF, 'X'), - (0x12C0, 'V'), - (0x12C1, 'X'), - (0x12C2, 'V'), - (0x12C6, 'X'), - (0x12C8, 'V'), - (0x12D7, 'X'), - (0x12D8, 'V'), - (0x1311, 'X'), - (0x1312, 'V'), - ] - -def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1316, 'X'), - (0x1318, 'V'), - (0x135B, 'X'), - (0x135D, 'V'), - (0x137D, 'X'), - (0x1380, 'V'), - (0x139A, 'X'), - (0x13A0, 'V'), - (0x13F6, 'X'), - (0x13F8, 'M', 'Ᏸ'), - (0x13F9, 'M', 'Ᏹ'), - (0x13FA, 'M', 'Ᏺ'), - (0x13FB, 'M', 'Ᏻ'), - (0x13FC, 'M', 'Ᏼ'), - (0x13FD, 'M', 'Ᏽ'), - (0x13FE, 'X'), - (0x1400, 'V'), - (0x1680, 'X'), - (0x1681, 'V'), - (0x169D, 'X'), - (0x16A0, 'V'), - (0x16F9, 'X'), - (0x1700, 'V'), - (0x1716, 'X'), - (0x171F, 'V'), - (0x1737, 'X'), - (0x1740, 'V'), - (0x1754, 'X'), - (0x1760, 'V'), - (0x176D, 'X'), - (0x176E, 'V'), - (0x1771, 'X'), - (0x1772, 'V'), - (0x1774, 'X'), - (0x1780, 'V'), - (0x17B4, 'X'), - (0x17B6, 'V'), - (0x17DE, 'X'), - (0x17E0, 'V'), - (0x17EA, 'X'), - (0x17F0, 'V'), - (0x17FA, 'X'), - (0x1800, 'V'), - (0x1806, 'X'), - (0x1807, 'V'), - (0x180B, 'I'), - (0x180E, 'X'), - (0x180F, 'I'), - (0x1810, 'V'), - (0x181A, 'X'), - (0x1820, 'V'), - (0x1879, 'X'), - (0x1880, 'V'), - (0x18AB, 'X'), - (0x18B0, 'V'), - (0x18F6, 'X'), - (0x1900, 'V'), - (0x191F, 'X'), - (0x1920, 'V'), - (0x192C, 'X'), - (0x1930, 'V'), - (0x193C, 'X'), - (0x1940, 'V'), - (0x1941, 'X'), - (0x1944, 'V'), - (0x196E, 'X'), - (0x1970, 'V'), - (0x1975, 'X'), - (0x1980, 'V'), - (0x19AC, 'X'), - (0x19B0, 'V'), - (0x19CA, 'X'), - (0x19D0, 'V'), - (0x19DB, 'X'), - (0x19DE, 'V'), - (0x1A1C, 'X'), - (0x1A1E, 'V'), - (0x1A5F, 'X'), - (0x1A60, 'V'), - (0x1A7D, 'X'), - (0x1A7F, 'V'), - (0x1A8A, 'X'), - (0x1A90, 'V'), - (0x1A9A, 'X'), - (0x1AA0, 'V'), - (0x1AAE, 'X'), - (0x1AB0, 'V'), - (0x1ACF, 'X'), - (0x1B00, 'V'), - (0x1B4D, 'X'), - (0x1B50, 'V'), - (0x1B7F, 'X'), - (0x1B80, 'V'), - (0x1BF4, 'X'), - (0x1BFC, 'V'), - (0x1C38, 'X'), - (0x1C3B, 'V'), - (0x1C4A, 'X'), - (0x1C4D, 'V'), - (0x1C80, 'M', 'в'), - ] - -def _seg_15() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1C81, 'M', 'д'), - (0x1C82, 'M', 'о'), - (0x1C83, 'M', 'с'), - (0x1C84, 'M', 'т'), - (0x1C86, 'M', 'ъ'), - (0x1C87, 'M', 'ѣ'), - (0x1C88, 'M', 'ꙋ'), - (0x1C89, 'X'), - (0x1C90, 'M', 'ა'), - (0x1C91, 'M', 'ბ'), - (0x1C92, 'M', 'გ'), - (0x1C93, 'M', 'დ'), - (0x1C94, 'M', 'ე'), - (0x1C95, 'M', 'ვ'), - (0x1C96, 'M', 'ზ'), - (0x1C97, 'M', 'თ'), - (0x1C98, 'M', 'ი'), - (0x1C99, 'M', 'კ'), - (0x1C9A, 'M', 'ლ'), - (0x1C9B, 'M', 'მ'), - (0x1C9C, 'M', 'ნ'), - (0x1C9D, 'M', 'ო'), - (0x1C9E, 'M', 'პ'), - (0x1C9F, 'M', 'ჟ'), - (0x1CA0, 'M', 'რ'), - (0x1CA1, 'M', 'ს'), - (0x1CA2, 'M', 'ტ'), - (0x1CA3, 'M', 'უ'), - (0x1CA4, 'M', 'ფ'), - (0x1CA5, 'M', 'ქ'), - (0x1CA6, 'M', 'ღ'), - (0x1CA7, 'M', 'ყ'), - (0x1CA8, 'M', 'შ'), - (0x1CA9, 'M', 'ჩ'), - (0x1CAA, 'M', 'ც'), - (0x1CAB, 'M', 'ძ'), - (0x1CAC, 'M', 'წ'), - (0x1CAD, 'M', 'ჭ'), - (0x1CAE, 'M', 'ხ'), - (0x1CAF, 'M', 'ჯ'), - (0x1CB0, 'M', 'ჰ'), - (0x1CB1, 'M', 'ჱ'), - (0x1CB2, 'M', 'ჲ'), - (0x1CB3, 'M', 'ჳ'), - (0x1CB4, 'M', 'ჴ'), - (0x1CB5, 'M', 'ჵ'), - (0x1CB6, 'M', 'ჶ'), - (0x1CB7, 'M', 'ჷ'), - (0x1CB8, 'M', 'ჸ'), - (0x1CB9, 'M', 'ჹ'), - (0x1CBA, 'M', 'ჺ'), - (0x1CBB, 'X'), - (0x1CBD, 'M', 'ჽ'), - (0x1CBE, 'M', 'ჾ'), - (0x1CBF, 'M', 'ჿ'), - (0x1CC0, 'V'), - (0x1CC8, 'X'), - (0x1CD0, 'V'), - (0x1CFB, 'X'), - (0x1D00, 'V'), - (0x1D2C, 'M', 'a'), - (0x1D2D, 'M', 'æ'), - (0x1D2E, 'M', 'b'), - (0x1D2F, 'V'), - (0x1D30, 'M', 'd'), - (0x1D31, 'M', 'e'), - (0x1D32, 'M', 'ǝ'), - (0x1D33, 'M', 'g'), - (0x1D34, 'M', 'h'), - (0x1D35, 'M', 'i'), - (0x1D36, 'M', 'j'), - (0x1D37, 'M', 'k'), - (0x1D38, 'M', 'l'), - (0x1D39, 'M', 'm'), - (0x1D3A, 'M', 'n'), - (0x1D3B, 'V'), - (0x1D3C, 'M', 'o'), - (0x1D3D, 'M', 'ȣ'), - (0x1D3E, 'M', 'p'), - (0x1D3F, 'M', 'r'), - (0x1D40, 'M', 't'), - (0x1D41, 'M', 'u'), - (0x1D42, 'M', 'w'), - (0x1D43, 'M', 'a'), - (0x1D44, 'M', 'ɐ'), - (0x1D45, 'M', 'ɑ'), - (0x1D46, 'M', 'ᴂ'), - (0x1D47, 'M', 'b'), - (0x1D48, 'M', 'd'), - (0x1D49, 'M', 'e'), - (0x1D4A, 'M', 'ə'), - (0x1D4B, 'M', 'ɛ'), - (0x1D4C, 'M', 'ɜ'), - (0x1D4D, 'M', 'g'), - (0x1D4E, 'V'), - (0x1D4F, 'M', 'k'), - (0x1D50, 'M', 'm'), - (0x1D51, 'M', 'ŋ'), - (0x1D52, 'M', 'o'), - (0x1D53, 'M', 'ɔ'), - ] - -def _seg_16() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D54, 'M', 'ᴖ'), - (0x1D55, 'M', 'ᴗ'), - (0x1D56, 'M', 'p'), - (0x1D57, 'M', 't'), - (0x1D58, 'M', 'u'), - (0x1D59, 'M', 'ᴝ'), - (0x1D5A, 'M', 'ɯ'), - (0x1D5B, 'M', 'v'), - (0x1D5C, 'M', 'ᴥ'), - (0x1D5D, 'M', 'β'), - (0x1D5E, 'M', 'γ'), - (0x1D5F, 'M', 'δ'), - (0x1D60, 'M', 'φ'), - (0x1D61, 'M', 'χ'), - (0x1D62, 'M', 'i'), - (0x1D63, 'M', 'r'), - (0x1D64, 'M', 'u'), - (0x1D65, 'M', 'v'), - (0x1D66, 'M', 'β'), - (0x1D67, 'M', 'γ'), - (0x1D68, 'M', 'ρ'), - (0x1D69, 'M', 'φ'), - (0x1D6A, 'M', 'χ'), - (0x1D6B, 'V'), - (0x1D78, 'M', 'н'), - (0x1D79, 'V'), - (0x1D9B, 'M', 'ɒ'), - (0x1D9C, 'M', 'c'), - (0x1D9D, 'M', 'ɕ'), - (0x1D9E, 'M', 'ð'), - (0x1D9F, 'M', 'ɜ'), - (0x1DA0, 'M', 'f'), - (0x1DA1, 'M', 'ɟ'), - (0x1DA2, 'M', 'ɡ'), - (0x1DA3, 'M', 'ɥ'), - (0x1DA4, 'M', 'ɨ'), - (0x1DA5, 'M', 'ɩ'), - (0x1DA6, 'M', 'ɪ'), - (0x1DA7, 'M', 'ᵻ'), - (0x1DA8, 'M', 'ʝ'), - (0x1DA9, 'M', 'ɭ'), - (0x1DAA, 'M', 'ᶅ'), - (0x1DAB, 'M', 'ʟ'), - (0x1DAC, 'M', 'ɱ'), - (0x1DAD, 'M', 'ɰ'), - (0x1DAE, 'M', 'ɲ'), - (0x1DAF, 'M', 'ɳ'), - (0x1DB0, 'M', 'ɴ'), - (0x1DB1, 'M', 'ɵ'), - (0x1DB2, 'M', 'ɸ'), - (0x1DB3, 'M', 'ʂ'), - (0x1DB4, 'M', 'ʃ'), - (0x1DB5, 'M', 'ƫ'), - (0x1DB6, 'M', 'ʉ'), - (0x1DB7, 'M', 'ʊ'), - (0x1DB8, 'M', 'ᴜ'), - (0x1DB9, 'M', 'ʋ'), - (0x1DBA, 'M', 'ʌ'), - (0x1DBB, 'M', 'z'), - (0x1DBC, 'M', 'ʐ'), - (0x1DBD, 'M', 'ʑ'), - (0x1DBE, 'M', 'ʒ'), - (0x1DBF, 'M', 'θ'), - (0x1DC0, 'V'), - (0x1E00, 'M', 'ḁ'), - (0x1E01, 'V'), - (0x1E02, 'M', 'ḃ'), - (0x1E03, 'V'), - (0x1E04, 'M', 'ḅ'), - (0x1E05, 'V'), - (0x1E06, 'M', 'ḇ'), - (0x1E07, 'V'), - (0x1E08, 'M', 'ḉ'), - (0x1E09, 'V'), - (0x1E0A, 'M', 'ḋ'), - (0x1E0B, 'V'), - (0x1E0C, 'M', 'ḍ'), - (0x1E0D, 'V'), - (0x1E0E, 'M', 'ḏ'), - (0x1E0F, 'V'), - (0x1E10, 'M', 'ḑ'), - (0x1E11, 'V'), - (0x1E12, 'M', 'ḓ'), - (0x1E13, 'V'), - (0x1E14, 'M', 'ḕ'), - (0x1E15, 'V'), - (0x1E16, 'M', 'ḗ'), - (0x1E17, 'V'), - (0x1E18, 'M', 'ḙ'), - (0x1E19, 'V'), - (0x1E1A, 'M', 'ḛ'), - (0x1E1B, 'V'), - (0x1E1C, 'M', 'ḝ'), - (0x1E1D, 'V'), - (0x1E1E, 'M', 'ḟ'), - (0x1E1F, 'V'), - (0x1E20, 'M', 'ḡ'), - (0x1E21, 'V'), - (0x1E22, 'M', 'ḣ'), - (0x1E23, 'V'), - ] - -def _seg_17() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E24, 'M', 'ḥ'), - (0x1E25, 'V'), - (0x1E26, 'M', 'ḧ'), - (0x1E27, 'V'), - (0x1E28, 'M', 'ḩ'), - (0x1E29, 'V'), - (0x1E2A, 'M', 'ḫ'), - (0x1E2B, 'V'), - (0x1E2C, 'M', 'ḭ'), - (0x1E2D, 'V'), - (0x1E2E, 'M', 'ḯ'), - (0x1E2F, 'V'), - (0x1E30, 'M', 'ḱ'), - (0x1E31, 'V'), - (0x1E32, 'M', 'ḳ'), - (0x1E33, 'V'), - (0x1E34, 'M', 'ḵ'), - (0x1E35, 'V'), - (0x1E36, 'M', 'ḷ'), - (0x1E37, 'V'), - (0x1E38, 'M', 'ḹ'), - (0x1E39, 'V'), - (0x1E3A, 'M', 'ḻ'), - (0x1E3B, 'V'), - (0x1E3C, 'M', 'ḽ'), - (0x1E3D, 'V'), - (0x1E3E, 'M', 'ḿ'), - (0x1E3F, 'V'), - (0x1E40, 'M', 'ṁ'), - (0x1E41, 'V'), - (0x1E42, 'M', 'ṃ'), - (0x1E43, 'V'), - (0x1E44, 'M', 'ṅ'), - (0x1E45, 'V'), - (0x1E46, 'M', 'ṇ'), - (0x1E47, 'V'), - (0x1E48, 'M', 'ṉ'), - (0x1E49, 'V'), - (0x1E4A, 'M', 'ṋ'), - (0x1E4B, 'V'), - (0x1E4C, 'M', 'ṍ'), - (0x1E4D, 'V'), - (0x1E4E, 'M', 'ṏ'), - (0x1E4F, 'V'), - (0x1E50, 'M', 'ṑ'), - (0x1E51, 'V'), - (0x1E52, 'M', 'ṓ'), - (0x1E53, 'V'), - (0x1E54, 'M', 'ṕ'), - (0x1E55, 'V'), - (0x1E56, 'M', 'ṗ'), - (0x1E57, 'V'), - (0x1E58, 'M', 'ṙ'), - (0x1E59, 'V'), - (0x1E5A, 'M', 'ṛ'), - (0x1E5B, 'V'), - (0x1E5C, 'M', 'ṝ'), - (0x1E5D, 'V'), - (0x1E5E, 'M', 'ṟ'), - (0x1E5F, 'V'), - (0x1E60, 'M', 'ṡ'), - (0x1E61, 'V'), - (0x1E62, 'M', 'ṣ'), - (0x1E63, 'V'), - (0x1E64, 'M', 'ṥ'), - (0x1E65, 'V'), - (0x1E66, 'M', 'ṧ'), - (0x1E67, 'V'), - (0x1E68, 'M', 'ṩ'), - (0x1E69, 'V'), - (0x1E6A, 'M', 'ṫ'), - (0x1E6B, 'V'), - (0x1E6C, 'M', 'ṭ'), - (0x1E6D, 'V'), - (0x1E6E, 'M', 'ṯ'), - (0x1E6F, 'V'), - (0x1E70, 'M', 'ṱ'), - (0x1E71, 'V'), - (0x1E72, 'M', 'ṳ'), - (0x1E73, 'V'), - (0x1E74, 'M', 'ṵ'), - (0x1E75, 'V'), - (0x1E76, 'M', 'ṷ'), - (0x1E77, 'V'), - (0x1E78, 'M', 'ṹ'), - (0x1E79, 'V'), - (0x1E7A, 'M', 'ṻ'), - (0x1E7B, 'V'), - (0x1E7C, 'M', 'ṽ'), - (0x1E7D, 'V'), - (0x1E7E, 'M', 'ṿ'), - (0x1E7F, 'V'), - (0x1E80, 'M', 'ẁ'), - (0x1E81, 'V'), - (0x1E82, 'M', 'ẃ'), - (0x1E83, 'V'), - (0x1E84, 'M', 'ẅ'), - (0x1E85, 'V'), - (0x1E86, 'M', 'ẇ'), - (0x1E87, 'V'), - ] - -def _seg_18() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E88, 'M', 'ẉ'), - (0x1E89, 'V'), - (0x1E8A, 'M', 'ẋ'), - (0x1E8B, 'V'), - (0x1E8C, 'M', 'ẍ'), - (0x1E8D, 'V'), - (0x1E8E, 'M', 'ẏ'), - (0x1E8F, 'V'), - (0x1E90, 'M', 'ẑ'), - (0x1E91, 'V'), - (0x1E92, 'M', 'ẓ'), - (0x1E93, 'V'), - (0x1E94, 'M', 'ẕ'), - (0x1E95, 'V'), - (0x1E9A, 'M', 'aʾ'), - (0x1E9B, 'M', 'ṡ'), - (0x1E9C, 'V'), - (0x1E9E, 'M', 'ss'), - (0x1E9F, 'V'), - (0x1EA0, 'M', 'ạ'), - (0x1EA1, 'V'), - (0x1EA2, 'M', 'ả'), - (0x1EA3, 'V'), - (0x1EA4, 'M', 'ấ'), - (0x1EA5, 'V'), - (0x1EA6, 'M', 'ầ'), - (0x1EA7, 'V'), - (0x1EA8, 'M', 'ẩ'), - (0x1EA9, 'V'), - (0x1EAA, 'M', 'ẫ'), - (0x1EAB, 'V'), - (0x1EAC, 'M', 'ậ'), - (0x1EAD, 'V'), - (0x1EAE, 'M', 'ắ'), - (0x1EAF, 'V'), - (0x1EB0, 'M', 'ằ'), - (0x1EB1, 'V'), - (0x1EB2, 'M', 'ẳ'), - (0x1EB3, 'V'), - (0x1EB4, 'M', 'ẵ'), - (0x1EB5, 'V'), - (0x1EB6, 'M', 'ặ'), - (0x1EB7, 'V'), - (0x1EB8, 'M', 'ẹ'), - (0x1EB9, 'V'), - (0x1EBA, 'M', 'ẻ'), - (0x1EBB, 'V'), - (0x1EBC, 'M', 'ẽ'), - (0x1EBD, 'V'), - (0x1EBE, 'M', 'ế'), - (0x1EBF, 'V'), - (0x1EC0, 'M', 'ề'), - (0x1EC1, 'V'), - (0x1EC2, 'M', 'ể'), - (0x1EC3, 'V'), - (0x1EC4, 'M', 'ễ'), - (0x1EC5, 'V'), - (0x1EC6, 'M', 'ệ'), - (0x1EC7, 'V'), - (0x1EC8, 'M', 'ỉ'), - (0x1EC9, 'V'), - (0x1ECA, 'M', 'ị'), - (0x1ECB, 'V'), - (0x1ECC, 'M', 'ọ'), - (0x1ECD, 'V'), - (0x1ECE, 'M', 'ỏ'), - (0x1ECF, 'V'), - (0x1ED0, 'M', 'ố'), - (0x1ED1, 'V'), - (0x1ED2, 'M', 'ồ'), - (0x1ED3, 'V'), - (0x1ED4, 'M', 'ổ'), - (0x1ED5, 'V'), - (0x1ED6, 'M', 'ỗ'), - (0x1ED7, 'V'), - (0x1ED8, 'M', 'ộ'), - (0x1ED9, 'V'), - (0x1EDA, 'M', 'ớ'), - (0x1EDB, 'V'), - (0x1EDC, 'M', 'ờ'), - (0x1EDD, 'V'), - (0x1EDE, 'M', 'ở'), - (0x1EDF, 'V'), - (0x1EE0, 'M', 'ỡ'), - (0x1EE1, 'V'), - (0x1EE2, 'M', 'ợ'), - (0x1EE3, 'V'), - (0x1EE4, 'M', 'ụ'), - (0x1EE5, 'V'), - (0x1EE6, 'M', 'ủ'), - (0x1EE7, 'V'), - (0x1EE8, 'M', 'ứ'), - (0x1EE9, 'V'), - (0x1EEA, 'M', 'ừ'), - (0x1EEB, 'V'), - (0x1EEC, 'M', 'ử'), - (0x1EED, 'V'), - (0x1EEE, 'M', 'ữ'), - (0x1EEF, 'V'), - (0x1EF0, 'M', 'ự'), - ] - -def _seg_19() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1EF1, 'V'), - (0x1EF2, 'M', 'ỳ'), - (0x1EF3, 'V'), - (0x1EF4, 'M', 'ỵ'), - (0x1EF5, 'V'), - (0x1EF6, 'M', 'ỷ'), - (0x1EF7, 'V'), - (0x1EF8, 'M', 'ỹ'), - (0x1EF9, 'V'), - (0x1EFA, 'M', 'ỻ'), - (0x1EFB, 'V'), - (0x1EFC, 'M', 'ỽ'), - (0x1EFD, 'V'), - (0x1EFE, 'M', 'ỿ'), - (0x1EFF, 'V'), - (0x1F08, 'M', 'ἀ'), - (0x1F09, 'M', 'ἁ'), - (0x1F0A, 'M', 'ἂ'), - (0x1F0B, 'M', 'ἃ'), - (0x1F0C, 'M', 'ἄ'), - (0x1F0D, 'M', 'ἅ'), - (0x1F0E, 'M', 'ἆ'), - (0x1F0F, 'M', 'ἇ'), - (0x1F10, 'V'), - (0x1F16, 'X'), - (0x1F18, 'M', 'ἐ'), - (0x1F19, 'M', 'ἑ'), - (0x1F1A, 'M', 'ἒ'), - (0x1F1B, 'M', 'ἓ'), - (0x1F1C, 'M', 'ἔ'), - (0x1F1D, 'M', 'ἕ'), - (0x1F1E, 'X'), - (0x1F20, 'V'), - (0x1F28, 'M', 'ἠ'), - (0x1F29, 'M', 'ἡ'), - (0x1F2A, 'M', 'ἢ'), - (0x1F2B, 'M', 'ἣ'), - (0x1F2C, 'M', 'ἤ'), - (0x1F2D, 'M', 'ἥ'), - (0x1F2E, 'M', 'ἦ'), - (0x1F2F, 'M', 'ἧ'), - (0x1F30, 'V'), - (0x1F38, 'M', 'ἰ'), - (0x1F39, 'M', 'ἱ'), - (0x1F3A, 'M', 'ἲ'), - (0x1F3B, 'M', 'ἳ'), - (0x1F3C, 'M', 'ἴ'), - (0x1F3D, 'M', 'ἵ'), - (0x1F3E, 'M', 'ἶ'), - (0x1F3F, 'M', 'ἷ'), - (0x1F40, 'V'), - (0x1F46, 'X'), - (0x1F48, 'M', 'ὀ'), - (0x1F49, 'M', 'ὁ'), - (0x1F4A, 'M', 'ὂ'), - (0x1F4B, 'M', 'ὃ'), - (0x1F4C, 'M', 'ὄ'), - (0x1F4D, 'M', 'ὅ'), - (0x1F4E, 'X'), - (0x1F50, 'V'), - (0x1F58, 'X'), - (0x1F59, 'M', 'ὑ'), - (0x1F5A, 'X'), - (0x1F5B, 'M', 'ὓ'), - (0x1F5C, 'X'), - (0x1F5D, 'M', 'ὕ'), - (0x1F5E, 'X'), - (0x1F5F, 'M', 'ὗ'), - (0x1F60, 'V'), - (0x1F68, 'M', 'ὠ'), - (0x1F69, 'M', 'ὡ'), - (0x1F6A, 'M', 'ὢ'), - (0x1F6B, 'M', 'ὣ'), - (0x1F6C, 'M', 'ὤ'), - (0x1F6D, 'M', 'ὥ'), - (0x1F6E, 'M', 'ὦ'), - (0x1F6F, 'M', 'ὧ'), - (0x1F70, 'V'), - (0x1F71, 'M', 'ά'), - (0x1F72, 'V'), - (0x1F73, 'M', 'έ'), - (0x1F74, 'V'), - (0x1F75, 'M', 'ή'), - (0x1F76, 'V'), - (0x1F77, 'M', 'ί'), - (0x1F78, 'V'), - (0x1F79, 'M', 'ό'), - (0x1F7A, 'V'), - (0x1F7B, 'M', 'ύ'), - (0x1F7C, 'V'), - (0x1F7D, 'M', 'ώ'), - (0x1F7E, 'X'), - (0x1F80, 'M', 'ἀι'), - (0x1F81, 'M', 'ἁι'), - (0x1F82, 'M', 'ἂι'), - (0x1F83, 'M', 'ἃι'), - (0x1F84, 'M', 'ἄι'), - (0x1F85, 'M', 'ἅι'), - (0x1F86, 'M', 'ἆι'), - (0x1F87, 'M', 'ἇι'), - ] - -def _seg_20() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1F88, 'M', 'ἀι'), - (0x1F89, 'M', 'ἁι'), - (0x1F8A, 'M', 'ἂι'), - (0x1F8B, 'M', 'ἃι'), - (0x1F8C, 'M', 'ἄι'), - (0x1F8D, 'M', 'ἅι'), - (0x1F8E, 'M', 'ἆι'), - (0x1F8F, 'M', 'ἇι'), - (0x1F90, 'M', 'ἠι'), - (0x1F91, 'M', 'ἡι'), - (0x1F92, 'M', 'ἢι'), - (0x1F93, 'M', 'ἣι'), - (0x1F94, 'M', 'ἤι'), - (0x1F95, 'M', 'ἥι'), - (0x1F96, 'M', 'ἦι'), - (0x1F97, 'M', 'ἧι'), - (0x1F98, 'M', 'ἠι'), - (0x1F99, 'M', 'ἡι'), - (0x1F9A, 'M', 'ἢι'), - (0x1F9B, 'M', 'ἣι'), - (0x1F9C, 'M', 'ἤι'), - (0x1F9D, 'M', 'ἥι'), - (0x1F9E, 'M', 'ἦι'), - (0x1F9F, 'M', 'ἧι'), - (0x1FA0, 'M', 'ὠι'), - (0x1FA1, 'M', 'ὡι'), - (0x1FA2, 'M', 'ὢι'), - (0x1FA3, 'M', 'ὣι'), - (0x1FA4, 'M', 'ὤι'), - (0x1FA5, 'M', 'ὥι'), - (0x1FA6, 'M', 'ὦι'), - (0x1FA7, 'M', 'ὧι'), - (0x1FA8, 'M', 'ὠι'), - (0x1FA9, 'M', 'ὡι'), - (0x1FAA, 'M', 'ὢι'), - (0x1FAB, 'M', 'ὣι'), - (0x1FAC, 'M', 'ὤι'), - (0x1FAD, 'M', 'ὥι'), - (0x1FAE, 'M', 'ὦι'), - (0x1FAF, 'M', 'ὧι'), - (0x1FB0, 'V'), - (0x1FB2, 'M', 'ὰι'), - (0x1FB3, 'M', 'αι'), - (0x1FB4, 'M', 'άι'), - (0x1FB5, 'X'), - (0x1FB6, 'V'), - (0x1FB7, 'M', 'ᾶι'), - (0x1FB8, 'M', 'ᾰ'), - (0x1FB9, 'M', 'ᾱ'), - (0x1FBA, 'M', 'ὰ'), - (0x1FBB, 'M', 'ά'), - (0x1FBC, 'M', 'αι'), - (0x1FBD, '3', ' ̓'), - (0x1FBE, 'M', 'ι'), - (0x1FBF, '3', ' ̓'), - (0x1FC0, '3', ' ͂'), - (0x1FC1, '3', ' ̈͂'), - (0x1FC2, 'M', 'ὴι'), - (0x1FC3, 'M', 'ηι'), - (0x1FC4, 'M', 'ήι'), - (0x1FC5, 'X'), - (0x1FC6, 'V'), - (0x1FC7, 'M', 'ῆι'), - (0x1FC8, 'M', 'ὲ'), - (0x1FC9, 'M', 'έ'), - (0x1FCA, 'M', 'ὴ'), - (0x1FCB, 'M', 'ή'), - (0x1FCC, 'M', 'ηι'), - (0x1FCD, '3', ' ̓̀'), - (0x1FCE, '3', ' ̓́'), - (0x1FCF, '3', ' ̓͂'), - (0x1FD0, 'V'), - (0x1FD3, 'M', 'ΐ'), - (0x1FD4, 'X'), - (0x1FD6, 'V'), - (0x1FD8, 'M', 'ῐ'), - (0x1FD9, 'M', 'ῑ'), - (0x1FDA, 'M', 'ὶ'), - (0x1FDB, 'M', 'ί'), - (0x1FDC, 'X'), - (0x1FDD, '3', ' ̔̀'), - (0x1FDE, '3', ' ̔́'), - (0x1FDF, '3', ' ̔͂'), - (0x1FE0, 'V'), - (0x1FE3, 'M', 'ΰ'), - (0x1FE4, 'V'), - (0x1FE8, 'M', 'ῠ'), - (0x1FE9, 'M', 'ῡ'), - (0x1FEA, 'M', 'ὺ'), - (0x1FEB, 'M', 'ύ'), - (0x1FEC, 'M', 'ῥ'), - (0x1FED, '3', ' ̈̀'), - (0x1FEE, '3', ' ̈́'), - (0x1FEF, '3', '`'), - (0x1FF0, 'X'), - (0x1FF2, 'M', 'ὼι'), - (0x1FF3, 'M', 'ωι'), - (0x1FF4, 'M', 'ώι'), - (0x1FF5, 'X'), - (0x1FF6, 'V'), - ] - -def _seg_21() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1FF7, 'M', 'ῶι'), - (0x1FF8, 'M', 'ὸ'), - (0x1FF9, 'M', 'ό'), - (0x1FFA, 'M', 'ὼ'), - (0x1FFB, 'M', 'ώ'), - (0x1FFC, 'M', 'ωι'), - (0x1FFD, '3', ' ́'), - (0x1FFE, '3', ' ̔'), - (0x1FFF, 'X'), - (0x2000, '3', ' '), - (0x200B, 'I'), - (0x200C, 'D', ''), - (0x200E, 'X'), - (0x2010, 'V'), - (0x2011, 'M', '‐'), - (0x2012, 'V'), - (0x2017, '3', ' ̳'), - (0x2018, 'V'), - (0x2024, 'X'), - (0x2027, 'V'), - (0x2028, 'X'), - (0x202F, '3', ' '), - (0x2030, 'V'), - (0x2033, 'M', '′′'), - (0x2034, 'M', '′′′'), - (0x2035, 'V'), - (0x2036, 'M', '‵‵'), - (0x2037, 'M', '‵‵‵'), - (0x2038, 'V'), - (0x203C, '3', '!!'), - (0x203D, 'V'), - (0x203E, '3', ' ̅'), - (0x203F, 'V'), - (0x2047, '3', '??'), - (0x2048, '3', '?!'), - (0x2049, '3', '!?'), - (0x204A, 'V'), - (0x2057, 'M', '′′′′'), - (0x2058, 'V'), - (0x205F, '3', ' '), - (0x2060, 'I'), - (0x2061, 'X'), - (0x2064, 'I'), - (0x2065, 'X'), - (0x2070, 'M', '0'), - (0x2071, 'M', 'i'), - (0x2072, 'X'), - (0x2074, 'M', '4'), - (0x2075, 'M', '5'), - (0x2076, 'M', '6'), - (0x2077, 'M', '7'), - (0x2078, 'M', '8'), - (0x2079, 'M', '9'), - (0x207A, '3', '+'), - (0x207B, 'M', '−'), - (0x207C, '3', '='), - (0x207D, '3', '('), - (0x207E, '3', ')'), - (0x207F, 'M', 'n'), - (0x2080, 'M', '0'), - (0x2081, 'M', '1'), - (0x2082, 'M', '2'), - (0x2083, 'M', '3'), - (0x2084, 'M', '4'), - (0x2085, 'M', '5'), - (0x2086, 'M', '6'), - (0x2087, 'M', '7'), - (0x2088, 'M', '8'), - (0x2089, 'M', '9'), - (0x208A, '3', '+'), - (0x208B, 'M', '−'), - (0x208C, '3', '='), - (0x208D, '3', '('), - (0x208E, '3', ')'), - (0x208F, 'X'), - (0x2090, 'M', 'a'), - (0x2091, 'M', 'e'), - (0x2092, 'M', 'o'), - (0x2093, 'M', 'x'), - (0x2094, 'M', 'ə'), - (0x2095, 'M', 'h'), - (0x2096, 'M', 'k'), - (0x2097, 'M', 'l'), - (0x2098, 'M', 'm'), - (0x2099, 'M', 'n'), - (0x209A, 'M', 'p'), - (0x209B, 'M', 's'), - (0x209C, 'M', 't'), - (0x209D, 'X'), - (0x20A0, 'V'), - (0x20A8, 'M', 'rs'), - (0x20A9, 'V'), - (0x20C1, 'X'), - (0x20D0, 'V'), - (0x20F1, 'X'), - (0x2100, '3', 'a/c'), - (0x2101, '3', 'a/s'), - (0x2102, 'M', 'c'), - (0x2103, 'M', '°c'), - (0x2104, 'V'), - ] - -def _seg_22() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2105, '3', 'c/o'), - (0x2106, '3', 'c/u'), - (0x2107, 'M', 'ɛ'), - (0x2108, 'V'), - (0x2109, 'M', '°f'), - (0x210A, 'M', 'g'), - (0x210B, 'M', 'h'), - (0x210F, 'M', 'ħ'), - (0x2110, 'M', 'i'), - (0x2112, 'M', 'l'), - (0x2114, 'V'), - (0x2115, 'M', 'n'), - (0x2116, 'M', 'no'), - (0x2117, 'V'), - (0x2119, 'M', 'p'), - (0x211A, 'M', 'q'), - (0x211B, 'M', 'r'), - (0x211E, 'V'), - (0x2120, 'M', 'sm'), - (0x2121, 'M', 'tel'), - (0x2122, 'M', 'tm'), - (0x2123, 'V'), - (0x2124, 'M', 'z'), - (0x2125, 'V'), - (0x2126, 'M', 'ω'), - (0x2127, 'V'), - (0x2128, 'M', 'z'), - (0x2129, 'V'), - (0x212A, 'M', 'k'), - (0x212B, 'M', 'å'), - (0x212C, 'M', 'b'), - (0x212D, 'M', 'c'), - (0x212E, 'V'), - (0x212F, 'M', 'e'), - (0x2131, 'M', 'f'), - (0x2132, 'X'), - (0x2133, 'M', 'm'), - (0x2134, 'M', 'o'), - (0x2135, 'M', 'א'), - (0x2136, 'M', 'ב'), - (0x2137, 'M', 'ג'), - (0x2138, 'M', 'ד'), - (0x2139, 'M', 'i'), - (0x213A, 'V'), - (0x213B, 'M', 'fax'), - (0x213C, 'M', 'π'), - (0x213D, 'M', 'γ'), - (0x213F, 'M', 'π'), - (0x2140, 'M', '∑'), - (0x2141, 'V'), - (0x2145, 'M', 'd'), - (0x2147, 'M', 'e'), - (0x2148, 'M', 'i'), - (0x2149, 'M', 'j'), - (0x214A, 'V'), - (0x2150, 'M', '1⁄7'), - (0x2151, 'M', '1⁄9'), - (0x2152, 'M', '1⁄10'), - (0x2153, 'M', '1⁄3'), - (0x2154, 'M', '2⁄3'), - (0x2155, 'M', '1⁄5'), - (0x2156, 'M', '2⁄5'), - (0x2157, 'M', '3⁄5'), - (0x2158, 'M', '4⁄5'), - (0x2159, 'M', '1⁄6'), - (0x215A, 'M', '5⁄6'), - (0x215B, 'M', '1⁄8'), - (0x215C, 'M', '3⁄8'), - (0x215D, 'M', '5⁄8'), - (0x215E, 'M', '7⁄8'), - (0x215F, 'M', '1⁄'), - (0x2160, 'M', 'i'), - (0x2161, 'M', 'ii'), - (0x2162, 'M', 'iii'), - (0x2163, 'M', 'iv'), - (0x2164, 'M', 'v'), - (0x2165, 'M', 'vi'), - (0x2166, 'M', 'vii'), - (0x2167, 'M', 'viii'), - (0x2168, 'M', 'ix'), - (0x2169, 'M', 'x'), - (0x216A, 'M', 'xi'), - (0x216B, 'M', 'xii'), - (0x216C, 'M', 'l'), - (0x216D, 'M', 'c'), - (0x216E, 'M', 'd'), - (0x216F, 'M', 'm'), - (0x2170, 'M', 'i'), - (0x2171, 'M', 'ii'), - (0x2172, 'M', 'iii'), - (0x2173, 'M', 'iv'), - (0x2174, 'M', 'v'), - (0x2175, 'M', 'vi'), - (0x2176, 'M', 'vii'), - (0x2177, 'M', 'viii'), - (0x2178, 'M', 'ix'), - (0x2179, 'M', 'x'), - (0x217A, 'M', 'xi'), - (0x217B, 'M', 'xii'), - (0x217C, 'M', 'l'), - ] - -def _seg_23() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x217D, 'M', 'c'), - (0x217E, 'M', 'd'), - (0x217F, 'M', 'm'), - (0x2180, 'V'), - (0x2183, 'X'), - (0x2184, 'V'), - (0x2189, 'M', '0⁄3'), - (0x218A, 'V'), - (0x218C, 'X'), - (0x2190, 'V'), - (0x222C, 'M', '∫∫'), - (0x222D, 'M', '∫∫∫'), - (0x222E, 'V'), - (0x222F, 'M', '∮∮'), - (0x2230, 'M', '∮∮∮'), - (0x2231, 'V'), - (0x2260, '3'), - (0x2261, 'V'), - (0x226E, '3'), - (0x2270, 'V'), - (0x2329, 'M', '〈'), - (0x232A, 'M', '〉'), - (0x232B, 'V'), - (0x2427, 'X'), - (0x2440, 'V'), - (0x244B, 'X'), - (0x2460, 'M', '1'), - (0x2461, 'M', '2'), - (0x2462, 'M', '3'), - (0x2463, 'M', '4'), - (0x2464, 'M', '5'), - (0x2465, 'M', '6'), - (0x2466, 'M', '7'), - (0x2467, 'M', '8'), - (0x2468, 'M', '9'), - (0x2469, 'M', '10'), - (0x246A, 'M', '11'), - (0x246B, 'M', '12'), - (0x246C, 'M', '13'), - (0x246D, 'M', '14'), - (0x246E, 'M', '15'), - (0x246F, 'M', '16'), - (0x2470, 'M', '17'), - (0x2471, 'M', '18'), - (0x2472, 'M', '19'), - (0x2473, 'M', '20'), - (0x2474, '3', '(1)'), - (0x2475, '3', '(2)'), - (0x2476, '3', '(3)'), - (0x2477, '3', '(4)'), - (0x2478, '3', '(5)'), - (0x2479, '3', '(6)'), - (0x247A, '3', '(7)'), - (0x247B, '3', '(8)'), - (0x247C, '3', '(9)'), - (0x247D, '3', '(10)'), - (0x247E, '3', '(11)'), - (0x247F, '3', '(12)'), - (0x2480, '3', '(13)'), - (0x2481, '3', '(14)'), - (0x2482, '3', '(15)'), - (0x2483, '3', '(16)'), - (0x2484, '3', '(17)'), - (0x2485, '3', '(18)'), - (0x2486, '3', '(19)'), - (0x2487, '3', '(20)'), - (0x2488, 'X'), - (0x249C, '3', '(a)'), - (0x249D, '3', '(b)'), - (0x249E, '3', '(c)'), - (0x249F, '3', '(d)'), - (0x24A0, '3', '(e)'), - (0x24A1, '3', '(f)'), - (0x24A2, '3', '(g)'), - (0x24A3, '3', '(h)'), - (0x24A4, '3', '(i)'), - (0x24A5, '3', '(j)'), - (0x24A6, '3', '(k)'), - (0x24A7, '3', '(l)'), - (0x24A8, '3', '(m)'), - (0x24A9, '3', '(n)'), - (0x24AA, '3', '(o)'), - (0x24AB, '3', '(p)'), - (0x24AC, '3', '(q)'), - (0x24AD, '3', '(r)'), - (0x24AE, '3', '(s)'), - (0x24AF, '3', '(t)'), - (0x24B0, '3', '(u)'), - (0x24B1, '3', '(v)'), - (0x24B2, '3', '(w)'), - (0x24B3, '3', '(x)'), - (0x24B4, '3', '(y)'), - (0x24B5, '3', '(z)'), - (0x24B6, 'M', 'a'), - (0x24B7, 'M', 'b'), - (0x24B8, 'M', 'c'), - (0x24B9, 'M', 'd'), - (0x24BA, 'M', 'e'), - (0x24BB, 'M', 'f'), - (0x24BC, 'M', 'g'), - ] - -def _seg_24() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x24BD, 'M', 'h'), - (0x24BE, 'M', 'i'), - (0x24BF, 'M', 'j'), - (0x24C0, 'M', 'k'), - (0x24C1, 'M', 'l'), - (0x24C2, 'M', 'm'), - (0x24C3, 'M', 'n'), - (0x24C4, 'M', 'o'), - (0x24C5, 'M', 'p'), - (0x24C6, 'M', 'q'), - (0x24C7, 'M', 'r'), - (0x24C8, 'M', 's'), - (0x24C9, 'M', 't'), - (0x24CA, 'M', 'u'), - (0x24CB, 'M', 'v'), - (0x24CC, 'M', 'w'), - (0x24CD, 'M', 'x'), - (0x24CE, 'M', 'y'), - (0x24CF, 'M', 'z'), - (0x24D0, 'M', 'a'), - (0x24D1, 'M', 'b'), - (0x24D2, 'M', 'c'), - (0x24D3, 'M', 'd'), - (0x24D4, 'M', 'e'), - (0x24D5, 'M', 'f'), - (0x24D6, 'M', 'g'), - (0x24D7, 'M', 'h'), - (0x24D8, 'M', 'i'), - (0x24D9, 'M', 'j'), - (0x24DA, 'M', 'k'), - (0x24DB, 'M', 'l'), - (0x24DC, 'M', 'm'), - (0x24DD, 'M', 'n'), - (0x24DE, 'M', 'o'), - (0x24DF, 'M', 'p'), - (0x24E0, 'M', 'q'), - (0x24E1, 'M', 'r'), - (0x24E2, 'M', 's'), - (0x24E3, 'M', 't'), - (0x24E4, 'M', 'u'), - (0x24E5, 'M', 'v'), - (0x24E6, 'M', 'w'), - (0x24E7, 'M', 'x'), - (0x24E8, 'M', 'y'), - (0x24E9, 'M', 'z'), - (0x24EA, 'M', '0'), - (0x24EB, 'V'), - (0x2A0C, 'M', '∫∫∫∫'), - (0x2A0D, 'V'), - (0x2A74, '3', '::='), - (0x2A75, '3', '=='), - (0x2A76, '3', '==='), - (0x2A77, 'V'), - (0x2ADC, 'M', '⫝̸'), - (0x2ADD, 'V'), - (0x2B74, 'X'), - (0x2B76, 'V'), - (0x2B96, 'X'), - (0x2B97, 'V'), - (0x2C00, 'M', 'ⰰ'), - (0x2C01, 'M', 'ⰱ'), - (0x2C02, 'M', 'ⰲ'), - (0x2C03, 'M', 'ⰳ'), - (0x2C04, 'M', 'ⰴ'), - (0x2C05, 'M', 'ⰵ'), - (0x2C06, 'M', 'ⰶ'), - (0x2C07, 'M', 'ⰷ'), - (0x2C08, 'M', 'ⰸ'), - (0x2C09, 'M', 'ⰹ'), - (0x2C0A, 'M', 'ⰺ'), - (0x2C0B, 'M', 'ⰻ'), - (0x2C0C, 'M', 'ⰼ'), - (0x2C0D, 'M', 'ⰽ'), - (0x2C0E, 'M', 'ⰾ'), - (0x2C0F, 'M', 'ⰿ'), - (0x2C10, 'M', 'ⱀ'), - (0x2C11, 'M', 'ⱁ'), - (0x2C12, 'M', 'ⱂ'), - (0x2C13, 'M', 'ⱃ'), - (0x2C14, 'M', 'ⱄ'), - (0x2C15, 'M', 'ⱅ'), - (0x2C16, 'M', 'ⱆ'), - (0x2C17, 'M', 'ⱇ'), - (0x2C18, 'M', 'ⱈ'), - (0x2C19, 'M', 'ⱉ'), - (0x2C1A, 'M', 'ⱊ'), - (0x2C1B, 'M', 'ⱋ'), - (0x2C1C, 'M', 'ⱌ'), - (0x2C1D, 'M', 'ⱍ'), - (0x2C1E, 'M', 'ⱎ'), - (0x2C1F, 'M', 'ⱏ'), - (0x2C20, 'M', 'ⱐ'), - (0x2C21, 'M', 'ⱑ'), - (0x2C22, 'M', 'ⱒ'), - (0x2C23, 'M', 'ⱓ'), - (0x2C24, 'M', 'ⱔ'), - (0x2C25, 'M', 'ⱕ'), - (0x2C26, 'M', 'ⱖ'), - (0x2C27, 'M', 'ⱗ'), - (0x2C28, 'M', 'ⱘ'), - ] - -def _seg_25() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2C29, 'M', 'ⱙ'), - (0x2C2A, 'M', 'ⱚ'), - (0x2C2B, 'M', 'ⱛ'), - (0x2C2C, 'M', 'ⱜ'), - (0x2C2D, 'M', 'ⱝ'), - (0x2C2E, 'M', 'ⱞ'), - (0x2C2F, 'M', 'ⱟ'), - (0x2C30, 'V'), - (0x2C60, 'M', 'ⱡ'), - (0x2C61, 'V'), - (0x2C62, 'M', 'ɫ'), - (0x2C63, 'M', 'ᵽ'), - (0x2C64, 'M', 'ɽ'), - (0x2C65, 'V'), - (0x2C67, 'M', 'ⱨ'), - (0x2C68, 'V'), - (0x2C69, 'M', 'ⱪ'), - (0x2C6A, 'V'), - (0x2C6B, 'M', 'ⱬ'), - (0x2C6C, 'V'), - (0x2C6D, 'M', 'ɑ'), - (0x2C6E, 'M', 'ɱ'), - (0x2C6F, 'M', 'ɐ'), - (0x2C70, 'M', 'ɒ'), - (0x2C71, 'V'), - (0x2C72, 'M', 'ⱳ'), - (0x2C73, 'V'), - (0x2C75, 'M', 'ⱶ'), - (0x2C76, 'V'), - (0x2C7C, 'M', 'j'), - (0x2C7D, 'M', 'v'), - (0x2C7E, 'M', 'ȿ'), - (0x2C7F, 'M', 'ɀ'), - (0x2C80, 'M', 'ⲁ'), - (0x2C81, 'V'), - (0x2C82, 'M', 'ⲃ'), - (0x2C83, 'V'), - (0x2C84, 'M', 'ⲅ'), - (0x2C85, 'V'), - (0x2C86, 'M', 'ⲇ'), - (0x2C87, 'V'), - (0x2C88, 'M', 'ⲉ'), - (0x2C89, 'V'), - (0x2C8A, 'M', 'ⲋ'), - (0x2C8B, 'V'), - (0x2C8C, 'M', 'ⲍ'), - (0x2C8D, 'V'), - (0x2C8E, 'M', 'ⲏ'), - (0x2C8F, 'V'), - (0x2C90, 'M', 'ⲑ'), - (0x2C91, 'V'), - (0x2C92, 'M', 'ⲓ'), - (0x2C93, 'V'), - (0x2C94, 'M', 'ⲕ'), - (0x2C95, 'V'), - (0x2C96, 'M', 'ⲗ'), - (0x2C97, 'V'), - (0x2C98, 'M', 'ⲙ'), - (0x2C99, 'V'), - (0x2C9A, 'M', 'ⲛ'), - (0x2C9B, 'V'), - (0x2C9C, 'M', 'ⲝ'), - (0x2C9D, 'V'), - (0x2C9E, 'M', 'ⲟ'), - (0x2C9F, 'V'), - (0x2CA0, 'M', 'ⲡ'), - (0x2CA1, 'V'), - (0x2CA2, 'M', 'ⲣ'), - (0x2CA3, 'V'), - (0x2CA4, 'M', 'ⲥ'), - (0x2CA5, 'V'), - (0x2CA6, 'M', 'ⲧ'), - (0x2CA7, 'V'), - (0x2CA8, 'M', 'ⲩ'), - (0x2CA9, 'V'), - (0x2CAA, 'M', 'ⲫ'), - (0x2CAB, 'V'), - (0x2CAC, 'M', 'ⲭ'), - (0x2CAD, 'V'), - (0x2CAE, 'M', 'ⲯ'), - (0x2CAF, 'V'), - (0x2CB0, 'M', 'ⲱ'), - (0x2CB1, 'V'), - (0x2CB2, 'M', 'ⲳ'), - (0x2CB3, 'V'), - (0x2CB4, 'M', 'ⲵ'), - (0x2CB5, 'V'), - (0x2CB6, 'M', 'ⲷ'), - (0x2CB7, 'V'), - (0x2CB8, 'M', 'ⲹ'), - (0x2CB9, 'V'), - (0x2CBA, 'M', 'ⲻ'), - (0x2CBB, 'V'), - (0x2CBC, 'M', 'ⲽ'), - (0x2CBD, 'V'), - (0x2CBE, 'M', 'ⲿ'), - (0x2CBF, 'V'), - (0x2CC0, 'M', 'ⳁ'), - (0x2CC1, 'V'), - (0x2CC2, 'M', 'ⳃ'), - ] - -def _seg_26() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2CC3, 'V'), - (0x2CC4, 'M', 'ⳅ'), - (0x2CC5, 'V'), - (0x2CC6, 'M', 'ⳇ'), - (0x2CC7, 'V'), - (0x2CC8, 'M', 'ⳉ'), - (0x2CC9, 'V'), - (0x2CCA, 'M', 'ⳋ'), - (0x2CCB, 'V'), - (0x2CCC, 'M', 'ⳍ'), - (0x2CCD, 'V'), - (0x2CCE, 'M', 'ⳏ'), - (0x2CCF, 'V'), - (0x2CD0, 'M', 'ⳑ'), - (0x2CD1, 'V'), - (0x2CD2, 'M', 'ⳓ'), - (0x2CD3, 'V'), - (0x2CD4, 'M', 'ⳕ'), - (0x2CD5, 'V'), - (0x2CD6, 'M', 'ⳗ'), - (0x2CD7, 'V'), - (0x2CD8, 'M', 'ⳙ'), - (0x2CD9, 'V'), - (0x2CDA, 'M', 'ⳛ'), - (0x2CDB, 'V'), - (0x2CDC, 'M', 'ⳝ'), - (0x2CDD, 'V'), - (0x2CDE, 'M', 'ⳟ'), - (0x2CDF, 'V'), - (0x2CE0, 'M', 'ⳡ'), - (0x2CE1, 'V'), - (0x2CE2, 'M', 'ⳣ'), - (0x2CE3, 'V'), - (0x2CEB, 'M', 'ⳬ'), - (0x2CEC, 'V'), - (0x2CED, 'M', 'ⳮ'), - (0x2CEE, 'V'), - (0x2CF2, 'M', 'ⳳ'), - (0x2CF3, 'V'), - (0x2CF4, 'X'), - (0x2CF9, 'V'), - (0x2D26, 'X'), - (0x2D27, 'V'), - (0x2D28, 'X'), - (0x2D2D, 'V'), - (0x2D2E, 'X'), - (0x2D30, 'V'), - (0x2D68, 'X'), - (0x2D6F, 'M', 'ⵡ'), - (0x2D70, 'V'), - (0x2D71, 'X'), - (0x2D7F, 'V'), - (0x2D97, 'X'), - (0x2DA0, 'V'), - (0x2DA7, 'X'), - (0x2DA8, 'V'), - (0x2DAF, 'X'), - (0x2DB0, 'V'), - (0x2DB7, 'X'), - (0x2DB8, 'V'), - (0x2DBF, 'X'), - (0x2DC0, 'V'), - (0x2DC7, 'X'), - (0x2DC8, 'V'), - (0x2DCF, 'X'), - (0x2DD0, 'V'), - (0x2DD7, 'X'), - (0x2DD8, 'V'), - (0x2DDF, 'X'), - (0x2DE0, 'V'), - (0x2E5E, 'X'), - (0x2E80, 'V'), - (0x2E9A, 'X'), - (0x2E9B, 'V'), - (0x2E9F, 'M', '母'), - (0x2EA0, 'V'), - (0x2EF3, 'M', '龟'), - (0x2EF4, 'X'), - (0x2F00, 'M', '一'), - (0x2F01, 'M', '丨'), - (0x2F02, 'M', '丶'), - (0x2F03, 'M', '丿'), - (0x2F04, 'M', '乙'), - (0x2F05, 'M', '亅'), - (0x2F06, 'M', '二'), - (0x2F07, 'M', '亠'), - (0x2F08, 'M', '人'), - (0x2F09, 'M', '儿'), - (0x2F0A, 'M', '入'), - (0x2F0B, 'M', '八'), - (0x2F0C, 'M', '冂'), - (0x2F0D, 'M', '冖'), - (0x2F0E, 'M', '冫'), - (0x2F0F, 'M', '几'), - (0x2F10, 'M', '凵'), - (0x2F11, 'M', '刀'), - (0x2F12, 'M', '力'), - (0x2F13, 'M', '勹'), - (0x2F14, 'M', '匕'), - (0x2F15, 'M', '匚'), - ] - -def _seg_27() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F16, 'M', '匸'), - (0x2F17, 'M', '十'), - (0x2F18, 'M', '卜'), - (0x2F19, 'M', '卩'), - (0x2F1A, 'M', '厂'), - (0x2F1B, 'M', '厶'), - (0x2F1C, 'M', '又'), - (0x2F1D, 'M', '口'), - (0x2F1E, 'M', '囗'), - (0x2F1F, 'M', '土'), - (0x2F20, 'M', '士'), - (0x2F21, 'M', '夂'), - (0x2F22, 'M', '夊'), - (0x2F23, 'M', '夕'), - (0x2F24, 'M', '大'), - (0x2F25, 'M', '女'), - (0x2F26, 'M', '子'), - (0x2F27, 'M', '宀'), - (0x2F28, 'M', '寸'), - (0x2F29, 'M', '小'), - (0x2F2A, 'M', '尢'), - (0x2F2B, 'M', '尸'), - (0x2F2C, 'M', '屮'), - (0x2F2D, 'M', '山'), - (0x2F2E, 'M', '巛'), - (0x2F2F, 'M', '工'), - (0x2F30, 'M', '己'), - (0x2F31, 'M', '巾'), - (0x2F32, 'M', '干'), - (0x2F33, 'M', '幺'), - (0x2F34, 'M', '广'), - (0x2F35, 'M', '廴'), - (0x2F36, 'M', '廾'), - (0x2F37, 'M', '弋'), - (0x2F38, 'M', '弓'), - (0x2F39, 'M', '彐'), - (0x2F3A, 'M', '彡'), - (0x2F3B, 'M', '彳'), - (0x2F3C, 'M', '心'), - (0x2F3D, 'M', '戈'), - (0x2F3E, 'M', '戶'), - (0x2F3F, 'M', '手'), - (0x2F40, 'M', '支'), - (0x2F41, 'M', '攴'), - (0x2F42, 'M', '文'), - (0x2F43, 'M', '斗'), - (0x2F44, 'M', '斤'), - (0x2F45, 'M', '方'), - (0x2F46, 'M', '无'), - (0x2F47, 'M', '日'), - (0x2F48, 'M', '曰'), - (0x2F49, 'M', '月'), - (0x2F4A, 'M', '木'), - (0x2F4B, 'M', '欠'), - (0x2F4C, 'M', '止'), - (0x2F4D, 'M', '歹'), - (0x2F4E, 'M', '殳'), - (0x2F4F, 'M', '毋'), - (0x2F50, 'M', '比'), - (0x2F51, 'M', '毛'), - (0x2F52, 'M', '氏'), - (0x2F53, 'M', '气'), - (0x2F54, 'M', '水'), - (0x2F55, 'M', '火'), - (0x2F56, 'M', '爪'), - (0x2F57, 'M', '父'), - (0x2F58, 'M', '爻'), - (0x2F59, 'M', '爿'), - (0x2F5A, 'M', '片'), - (0x2F5B, 'M', '牙'), - (0x2F5C, 'M', '牛'), - (0x2F5D, 'M', '犬'), - (0x2F5E, 'M', '玄'), - (0x2F5F, 'M', '玉'), - (0x2F60, 'M', '瓜'), - (0x2F61, 'M', '瓦'), - (0x2F62, 'M', '甘'), - (0x2F63, 'M', '生'), - (0x2F64, 'M', '用'), - (0x2F65, 'M', '田'), - (0x2F66, 'M', '疋'), - (0x2F67, 'M', '疒'), - (0x2F68, 'M', '癶'), - (0x2F69, 'M', '白'), - (0x2F6A, 'M', '皮'), - (0x2F6B, 'M', '皿'), - (0x2F6C, 'M', '目'), - (0x2F6D, 'M', '矛'), - (0x2F6E, 'M', '矢'), - (0x2F6F, 'M', '石'), - (0x2F70, 'M', '示'), - (0x2F71, 'M', '禸'), - (0x2F72, 'M', '禾'), - (0x2F73, 'M', '穴'), - (0x2F74, 'M', '立'), - (0x2F75, 'M', '竹'), - (0x2F76, 'M', '米'), - (0x2F77, 'M', '糸'), - (0x2F78, 'M', '缶'), - (0x2F79, 'M', '网'), - ] - -def _seg_28() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F7A, 'M', '羊'), - (0x2F7B, 'M', '羽'), - (0x2F7C, 'M', '老'), - (0x2F7D, 'M', '而'), - (0x2F7E, 'M', '耒'), - (0x2F7F, 'M', '耳'), - (0x2F80, 'M', '聿'), - (0x2F81, 'M', '肉'), - (0x2F82, 'M', '臣'), - (0x2F83, 'M', '自'), - (0x2F84, 'M', '至'), - (0x2F85, 'M', '臼'), - (0x2F86, 'M', '舌'), - (0x2F87, 'M', '舛'), - (0x2F88, 'M', '舟'), - (0x2F89, 'M', '艮'), - (0x2F8A, 'M', '色'), - (0x2F8B, 'M', '艸'), - (0x2F8C, 'M', '虍'), - (0x2F8D, 'M', '虫'), - (0x2F8E, 'M', '血'), - (0x2F8F, 'M', '行'), - (0x2F90, 'M', '衣'), - (0x2F91, 'M', '襾'), - (0x2F92, 'M', '見'), - (0x2F93, 'M', '角'), - (0x2F94, 'M', '言'), - (0x2F95, 'M', '谷'), - (0x2F96, 'M', '豆'), - (0x2F97, 'M', '豕'), - (0x2F98, 'M', '豸'), - (0x2F99, 'M', '貝'), - (0x2F9A, 'M', '赤'), - (0x2F9B, 'M', '走'), - (0x2F9C, 'M', '足'), - (0x2F9D, 'M', '身'), - (0x2F9E, 'M', '車'), - (0x2F9F, 'M', '辛'), - (0x2FA0, 'M', '辰'), - (0x2FA1, 'M', '辵'), - (0x2FA2, 'M', '邑'), - (0x2FA3, 'M', '酉'), - (0x2FA4, 'M', '釆'), - (0x2FA5, 'M', '里'), - (0x2FA6, 'M', '金'), - (0x2FA7, 'M', '長'), - (0x2FA8, 'M', '門'), - (0x2FA9, 'M', '阜'), - (0x2FAA, 'M', '隶'), - (0x2FAB, 'M', '隹'), - (0x2FAC, 'M', '雨'), - (0x2FAD, 'M', '靑'), - (0x2FAE, 'M', '非'), - (0x2FAF, 'M', '面'), - (0x2FB0, 'M', '革'), - (0x2FB1, 'M', '韋'), - (0x2FB2, 'M', '韭'), - (0x2FB3, 'M', '音'), - (0x2FB4, 'M', '頁'), - (0x2FB5, 'M', '風'), - (0x2FB6, 'M', '飛'), - (0x2FB7, 'M', '食'), - (0x2FB8, 'M', '首'), - (0x2FB9, 'M', '香'), - (0x2FBA, 'M', '馬'), - (0x2FBB, 'M', '骨'), - (0x2FBC, 'M', '高'), - (0x2FBD, 'M', '髟'), - (0x2FBE, 'M', '鬥'), - (0x2FBF, 'M', '鬯'), - (0x2FC0, 'M', '鬲'), - (0x2FC1, 'M', '鬼'), - (0x2FC2, 'M', '魚'), - (0x2FC3, 'M', '鳥'), - (0x2FC4, 'M', '鹵'), - (0x2FC5, 'M', '鹿'), - (0x2FC6, 'M', '麥'), - (0x2FC7, 'M', '麻'), - (0x2FC8, 'M', '黃'), - (0x2FC9, 'M', '黍'), - (0x2FCA, 'M', '黑'), - (0x2FCB, 'M', '黹'), - (0x2FCC, 'M', '黽'), - (0x2FCD, 'M', '鼎'), - (0x2FCE, 'M', '鼓'), - (0x2FCF, 'M', '鼠'), - (0x2FD0, 'M', '鼻'), - (0x2FD1, 'M', '齊'), - (0x2FD2, 'M', '齒'), - (0x2FD3, 'M', '龍'), - (0x2FD4, 'M', '龜'), - (0x2FD5, 'M', '龠'), - (0x2FD6, 'X'), - (0x3000, '3', ' '), - (0x3001, 'V'), - (0x3002, 'M', '.'), - (0x3003, 'V'), - (0x3036, 'M', '〒'), - (0x3037, 'V'), - (0x3038, 'M', '十'), - ] - -def _seg_29() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3039, 'M', '卄'), - (0x303A, 'M', '卅'), - (0x303B, 'V'), - (0x3040, 'X'), - (0x3041, 'V'), - (0x3097, 'X'), - (0x3099, 'V'), - (0x309B, '3', ' ゙'), - (0x309C, '3', ' ゚'), - (0x309D, 'V'), - (0x309F, 'M', 'より'), - (0x30A0, 'V'), - (0x30FF, 'M', 'コト'), - (0x3100, 'X'), - (0x3105, 'V'), - (0x3130, 'X'), - (0x3131, 'M', 'ᄀ'), - (0x3132, 'M', 'ᄁ'), - (0x3133, 'M', 'ᆪ'), - (0x3134, 'M', 'ᄂ'), - (0x3135, 'M', 'ᆬ'), - (0x3136, 'M', 'ᆭ'), - (0x3137, 'M', 'ᄃ'), - (0x3138, 'M', 'ᄄ'), - (0x3139, 'M', 'ᄅ'), - (0x313A, 'M', 'ᆰ'), - (0x313B, 'M', 'ᆱ'), - (0x313C, 'M', 'ᆲ'), - (0x313D, 'M', 'ᆳ'), - (0x313E, 'M', 'ᆴ'), - (0x313F, 'M', 'ᆵ'), - (0x3140, 'M', 'ᄚ'), - (0x3141, 'M', 'ᄆ'), - (0x3142, 'M', 'ᄇ'), - (0x3143, 'M', 'ᄈ'), - (0x3144, 'M', 'ᄡ'), - (0x3145, 'M', 'ᄉ'), - (0x3146, 'M', 'ᄊ'), - (0x3147, 'M', 'ᄋ'), - (0x3148, 'M', 'ᄌ'), - (0x3149, 'M', 'ᄍ'), - (0x314A, 'M', 'ᄎ'), - (0x314B, 'M', 'ᄏ'), - (0x314C, 'M', 'ᄐ'), - (0x314D, 'M', 'ᄑ'), - (0x314E, 'M', 'ᄒ'), - (0x314F, 'M', 'ᅡ'), - (0x3150, 'M', 'ᅢ'), - (0x3151, 'M', 'ᅣ'), - (0x3152, 'M', 'ᅤ'), - (0x3153, 'M', 'ᅥ'), - (0x3154, 'M', 'ᅦ'), - (0x3155, 'M', 'ᅧ'), - (0x3156, 'M', 'ᅨ'), - (0x3157, 'M', 'ᅩ'), - (0x3158, 'M', 'ᅪ'), - (0x3159, 'M', 'ᅫ'), - (0x315A, 'M', 'ᅬ'), - (0x315B, 'M', 'ᅭ'), - (0x315C, 'M', 'ᅮ'), - (0x315D, 'M', 'ᅯ'), - (0x315E, 'M', 'ᅰ'), - (0x315F, 'M', 'ᅱ'), - (0x3160, 'M', 'ᅲ'), - (0x3161, 'M', 'ᅳ'), - (0x3162, 'M', 'ᅴ'), - (0x3163, 'M', 'ᅵ'), - (0x3164, 'X'), - (0x3165, 'M', 'ᄔ'), - (0x3166, 'M', 'ᄕ'), - (0x3167, 'M', 'ᇇ'), - (0x3168, 'M', 'ᇈ'), - (0x3169, 'M', 'ᇌ'), - (0x316A, 'M', 'ᇎ'), - (0x316B, 'M', 'ᇓ'), - (0x316C, 'M', 'ᇗ'), - (0x316D, 'M', 'ᇙ'), - (0x316E, 'M', 'ᄜ'), - (0x316F, 'M', 'ᇝ'), - (0x3170, 'M', 'ᇟ'), - (0x3171, 'M', 'ᄝ'), - (0x3172, 'M', 'ᄞ'), - (0x3173, 'M', 'ᄠ'), - (0x3174, 'M', 'ᄢ'), - (0x3175, 'M', 'ᄣ'), - (0x3176, 'M', 'ᄧ'), - (0x3177, 'M', 'ᄩ'), - (0x3178, 'M', 'ᄫ'), - (0x3179, 'M', 'ᄬ'), - (0x317A, 'M', 'ᄭ'), - (0x317B, 'M', 'ᄮ'), - (0x317C, 'M', 'ᄯ'), - (0x317D, 'M', 'ᄲ'), - (0x317E, 'M', 'ᄶ'), - (0x317F, 'M', 'ᅀ'), - (0x3180, 'M', 'ᅇ'), - (0x3181, 'M', 'ᅌ'), - (0x3182, 'M', 'ᇱ'), - (0x3183, 'M', 'ᇲ'), - (0x3184, 'M', 'ᅗ'), - ] - -def _seg_30() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3185, 'M', 'ᅘ'), - (0x3186, 'M', 'ᅙ'), - (0x3187, 'M', 'ᆄ'), - (0x3188, 'M', 'ᆅ'), - (0x3189, 'M', 'ᆈ'), - (0x318A, 'M', 'ᆑ'), - (0x318B, 'M', 'ᆒ'), - (0x318C, 'M', 'ᆔ'), - (0x318D, 'M', 'ᆞ'), - (0x318E, 'M', 'ᆡ'), - (0x318F, 'X'), - (0x3190, 'V'), - (0x3192, 'M', '一'), - (0x3193, 'M', '二'), - (0x3194, 'M', '三'), - (0x3195, 'M', '四'), - (0x3196, 'M', '上'), - (0x3197, 'M', '中'), - (0x3198, 'M', '下'), - (0x3199, 'M', '甲'), - (0x319A, 'M', '乙'), - (0x319B, 'M', '丙'), - (0x319C, 'M', '丁'), - (0x319D, 'M', '天'), - (0x319E, 'M', '地'), - (0x319F, 'M', '人'), - (0x31A0, 'V'), - (0x31E4, 'X'), - (0x31F0, 'V'), - (0x3200, '3', '(ᄀ)'), - (0x3201, '3', '(ᄂ)'), - (0x3202, '3', '(ᄃ)'), - (0x3203, '3', '(ᄅ)'), - (0x3204, '3', '(ᄆ)'), - (0x3205, '3', '(ᄇ)'), - (0x3206, '3', '(ᄉ)'), - (0x3207, '3', '(ᄋ)'), - (0x3208, '3', '(ᄌ)'), - (0x3209, '3', '(ᄎ)'), - (0x320A, '3', '(ᄏ)'), - (0x320B, '3', '(ᄐ)'), - (0x320C, '3', '(ᄑ)'), - (0x320D, '3', '(ᄒ)'), - (0x320E, '3', '(가)'), - (0x320F, '3', '(나)'), - (0x3210, '3', '(다)'), - (0x3211, '3', '(라)'), - (0x3212, '3', '(마)'), - (0x3213, '3', '(바)'), - (0x3214, '3', '(사)'), - (0x3215, '3', '(아)'), - (0x3216, '3', '(자)'), - (0x3217, '3', '(차)'), - (0x3218, '3', '(카)'), - (0x3219, '3', '(타)'), - (0x321A, '3', '(파)'), - (0x321B, '3', '(하)'), - (0x321C, '3', '(주)'), - (0x321D, '3', '(오전)'), - (0x321E, '3', '(오후)'), - (0x321F, 'X'), - (0x3220, '3', '(一)'), - (0x3221, '3', '(二)'), - (0x3222, '3', '(三)'), - (0x3223, '3', '(四)'), - (0x3224, '3', '(五)'), - (0x3225, '3', '(六)'), - (0x3226, '3', '(七)'), - (0x3227, '3', '(八)'), - (0x3228, '3', '(九)'), - (0x3229, '3', '(十)'), - (0x322A, '3', '(月)'), - (0x322B, '3', '(火)'), - (0x322C, '3', '(水)'), - (0x322D, '3', '(木)'), - (0x322E, '3', '(金)'), - (0x322F, '3', '(土)'), - (0x3230, '3', '(日)'), - (0x3231, '3', '(株)'), - (0x3232, '3', '(有)'), - (0x3233, '3', '(社)'), - (0x3234, '3', '(名)'), - (0x3235, '3', '(特)'), - (0x3236, '3', '(財)'), - (0x3237, '3', '(祝)'), - (0x3238, '3', '(労)'), - (0x3239, '3', '(代)'), - (0x323A, '3', '(呼)'), - (0x323B, '3', '(学)'), - (0x323C, '3', '(監)'), - (0x323D, '3', '(企)'), - (0x323E, '3', '(資)'), - (0x323F, '3', '(協)'), - (0x3240, '3', '(祭)'), - (0x3241, '3', '(休)'), - (0x3242, '3', '(自)'), - (0x3243, '3', '(至)'), - (0x3244, 'M', '問'), - (0x3245, 'M', '幼'), - (0x3246, 'M', '文'), - ] - -def _seg_31() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3247, 'M', '箏'), - (0x3248, 'V'), - (0x3250, 'M', 'pte'), - (0x3251, 'M', '21'), - (0x3252, 'M', '22'), - (0x3253, 'M', '23'), - (0x3254, 'M', '24'), - (0x3255, 'M', '25'), - (0x3256, 'M', '26'), - (0x3257, 'M', '27'), - (0x3258, 'M', '28'), - (0x3259, 'M', '29'), - (0x325A, 'M', '30'), - (0x325B, 'M', '31'), - (0x325C, 'M', '32'), - (0x325D, 'M', '33'), - (0x325E, 'M', '34'), - (0x325F, 'M', '35'), - (0x3260, 'M', 'ᄀ'), - (0x3261, 'M', 'ᄂ'), - (0x3262, 'M', 'ᄃ'), - (0x3263, 'M', 'ᄅ'), - (0x3264, 'M', 'ᄆ'), - (0x3265, 'M', 'ᄇ'), - (0x3266, 'M', 'ᄉ'), - (0x3267, 'M', 'ᄋ'), - (0x3268, 'M', 'ᄌ'), - (0x3269, 'M', 'ᄎ'), - (0x326A, 'M', 'ᄏ'), - (0x326B, 'M', 'ᄐ'), - (0x326C, 'M', 'ᄑ'), - (0x326D, 'M', 'ᄒ'), - (0x326E, 'M', '가'), - (0x326F, 'M', '나'), - (0x3270, 'M', '다'), - (0x3271, 'M', '라'), - (0x3272, 'M', '마'), - (0x3273, 'M', '바'), - (0x3274, 'M', '사'), - (0x3275, 'M', '아'), - (0x3276, 'M', '자'), - (0x3277, 'M', '차'), - (0x3278, 'M', '카'), - (0x3279, 'M', '타'), - (0x327A, 'M', '파'), - (0x327B, 'M', '하'), - (0x327C, 'M', '참고'), - (0x327D, 'M', '주의'), - (0x327E, 'M', '우'), - (0x327F, 'V'), - (0x3280, 'M', '一'), - (0x3281, 'M', '二'), - (0x3282, 'M', '三'), - (0x3283, 'M', '四'), - (0x3284, 'M', '五'), - (0x3285, 'M', '六'), - (0x3286, 'M', '七'), - (0x3287, 'M', '八'), - (0x3288, 'M', '九'), - (0x3289, 'M', '十'), - (0x328A, 'M', '月'), - (0x328B, 'M', '火'), - (0x328C, 'M', '水'), - (0x328D, 'M', '木'), - (0x328E, 'M', '金'), - (0x328F, 'M', '土'), - (0x3290, 'M', '日'), - (0x3291, 'M', '株'), - (0x3292, 'M', '有'), - (0x3293, 'M', '社'), - (0x3294, 'M', '名'), - (0x3295, 'M', '特'), - (0x3296, 'M', '財'), - (0x3297, 'M', '祝'), - (0x3298, 'M', '労'), - (0x3299, 'M', '秘'), - (0x329A, 'M', '男'), - (0x329B, 'M', '女'), - (0x329C, 'M', '適'), - (0x329D, 'M', '優'), - (0x329E, 'M', '印'), - (0x329F, 'M', '注'), - (0x32A0, 'M', '項'), - (0x32A1, 'M', '休'), - (0x32A2, 'M', '写'), - (0x32A3, 'M', '正'), - (0x32A4, 'M', '上'), - (0x32A5, 'M', '中'), - (0x32A6, 'M', '下'), - (0x32A7, 'M', '左'), - (0x32A8, 'M', '右'), - (0x32A9, 'M', '医'), - (0x32AA, 'M', '宗'), - (0x32AB, 'M', '学'), - (0x32AC, 'M', '監'), - (0x32AD, 'M', '企'), - (0x32AE, 'M', '資'), - (0x32AF, 'M', '協'), - (0x32B0, 'M', '夜'), - (0x32B1, 'M', '36'), - ] - -def _seg_32() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x32B2, 'M', '37'), - (0x32B3, 'M', '38'), - (0x32B4, 'M', '39'), - (0x32B5, 'M', '40'), - (0x32B6, 'M', '41'), - (0x32B7, 'M', '42'), - (0x32B8, 'M', '43'), - (0x32B9, 'M', '44'), - (0x32BA, 'M', '45'), - (0x32BB, 'M', '46'), - (0x32BC, 'M', '47'), - (0x32BD, 'M', '48'), - (0x32BE, 'M', '49'), - (0x32BF, 'M', '50'), - (0x32C0, 'M', '1月'), - (0x32C1, 'M', '2月'), - (0x32C2, 'M', '3月'), - (0x32C3, 'M', '4月'), - (0x32C4, 'M', '5月'), - (0x32C5, 'M', '6月'), - (0x32C6, 'M', '7月'), - (0x32C7, 'M', '8月'), - (0x32C8, 'M', '9月'), - (0x32C9, 'M', '10月'), - (0x32CA, 'M', '11月'), - (0x32CB, 'M', '12月'), - (0x32CC, 'M', 'hg'), - (0x32CD, 'M', 'erg'), - (0x32CE, 'M', 'ev'), - (0x32CF, 'M', 'ltd'), - (0x32D0, 'M', 'ア'), - (0x32D1, 'M', 'イ'), - (0x32D2, 'M', 'ウ'), - (0x32D3, 'M', 'エ'), - (0x32D4, 'M', 'オ'), - (0x32D5, 'M', 'カ'), - (0x32D6, 'M', 'キ'), - (0x32D7, 'M', 'ク'), - (0x32D8, 'M', 'ケ'), - (0x32D9, 'M', 'コ'), - (0x32DA, 'M', 'サ'), - (0x32DB, 'M', 'シ'), - (0x32DC, 'M', 'ス'), - (0x32DD, 'M', 'セ'), - (0x32DE, 'M', 'ソ'), - (0x32DF, 'M', 'タ'), - (0x32E0, 'M', 'チ'), - (0x32E1, 'M', 'ツ'), - (0x32E2, 'M', 'テ'), - (0x32E3, 'M', 'ト'), - (0x32E4, 'M', 'ナ'), - (0x32E5, 'M', 'ニ'), - (0x32E6, 'M', 'ヌ'), - (0x32E7, 'M', 'ネ'), - (0x32E8, 'M', 'ノ'), - (0x32E9, 'M', 'ハ'), - (0x32EA, 'M', 'ヒ'), - (0x32EB, 'M', 'フ'), - (0x32EC, 'M', 'ヘ'), - (0x32ED, 'M', 'ホ'), - (0x32EE, 'M', 'マ'), - (0x32EF, 'M', 'ミ'), - (0x32F0, 'M', 'ム'), - (0x32F1, 'M', 'メ'), - (0x32F2, 'M', 'モ'), - (0x32F3, 'M', 'ヤ'), - (0x32F4, 'M', 'ユ'), - (0x32F5, 'M', 'ヨ'), - (0x32F6, 'M', 'ラ'), - (0x32F7, 'M', 'リ'), - (0x32F8, 'M', 'ル'), - (0x32F9, 'M', 'レ'), - (0x32FA, 'M', 'ロ'), - (0x32FB, 'M', 'ワ'), - (0x32FC, 'M', 'ヰ'), - (0x32FD, 'M', 'ヱ'), - (0x32FE, 'M', 'ヲ'), - (0x32FF, 'M', '令和'), - (0x3300, 'M', 'アパート'), - (0x3301, 'M', 'アルファ'), - (0x3302, 'M', 'アンペア'), - (0x3303, 'M', 'アール'), - (0x3304, 'M', 'イニング'), - (0x3305, 'M', 'インチ'), - (0x3306, 'M', 'ウォン'), - (0x3307, 'M', 'エスクード'), - (0x3308, 'M', 'エーカー'), - (0x3309, 'M', 'オンス'), - (0x330A, 'M', 'オーム'), - (0x330B, 'M', 'カイリ'), - (0x330C, 'M', 'カラット'), - (0x330D, 'M', 'カロリー'), - (0x330E, 'M', 'ガロン'), - (0x330F, 'M', 'ガンマ'), - (0x3310, 'M', 'ギガ'), - (0x3311, 'M', 'ギニー'), - (0x3312, 'M', 'キュリー'), - (0x3313, 'M', 'ギルダー'), - (0x3314, 'M', 'キロ'), - (0x3315, 'M', 'キログラム'), - ] - -def _seg_33() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x3316, 'M', 'キロメートル'), - (0x3317, 'M', 'キロワット'), - (0x3318, 'M', 'グラム'), - (0x3319, 'M', 'グラムトン'), - (0x331A, 'M', 'クルゼイロ'), - (0x331B, 'M', 'クローネ'), - (0x331C, 'M', 'ケース'), - (0x331D, 'M', 'コルナ'), - (0x331E, 'M', 'コーポ'), - (0x331F, 'M', 'サイクル'), - (0x3320, 'M', 'サンチーム'), - (0x3321, 'M', 'シリング'), - (0x3322, 'M', 'センチ'), - (0x3323, 'M', 'セント'), - (0x3324, 'M', 'ダース'), - (0x3325, 'M', 'デシ'), - (0x3326, 'M', 'ドル'), - (0x3327, 'M', 'トン'), - (0x3328, 'M', 'ナノ'), - (0x3329, 'M', 'ノット'), - (0x332A, 'M', 'ハイツ'), - (0x332B, 'M', 'パーセント'), - (0x332C, 'M', 'パーツ'), - (0x332D, 'M', 'バーレル'), - (0x332E, 'M', 'ピアストル'), - (0x332F, 'M', 'ピクル'), - (0x3330, 'M', 'ピコ'), - (0x3331, 'M', 'ビル'), - (0x3332, 'M', 'ファラッド'), - (0x3333, 'M', 'フィート'), - (0x3334, 'M', 'ブッシェル'), - (0x3335, 'M', 'フラン'), - (0x3336, 'M', 'ヘクタール'), - (0x3337, 'M', 'ペソ'), - (0x3338, 'M', 'ペニヒ'), - (0x3339, 'M', 'ヘルツ'), - (0x333A, 'M', 'ペンス'), - (0x333B, 'M', 'ページ'), - (0x333C, 'M', 'ベータ'), - (0x333D, 'M', 'ポイント'), - (0x333E, 'M', 'ボルト'), - (0x333F, 'M', 'ホン'), - (0x3340, 'M', 'ポンド'), - (0x3341, 'M', 'ホール'), - (0x3342, 'M', 'ホーン'), - (0x3343, 'M', 'マイクロ'), - (0x3344, 'M', 'マイル'), - (0x3345, 'M', 'マッハ'), - (0x3346, 'M', 'マルク'), - (0x3347, 'M', 'マンション'), - (0x3348, 'M', 'ミクロン'), - (0x3349, 'M', 'ミリ'), - (0x334A, 'M', 'ミリバール'), - (0x334B, 'M', 'メガ'), - (0x334C, 'M', 'メガトン'), - (0x334D, 'M', 'メートル'), - (0x334E, 'M', 'ヤード'), - (0x334F, 'M', 'ヤール'), - (0x3350, 'M', 'ユアン'), - (0x3351, 'M', 'リットル'), - (0x3352, 'M', 'リラ'), - (0x3353, 'M', 'ルピー'), - (0x3354, 'M', 'ルーブル'), - (0x3355, 'M', 'レム'), - (0x3356, 'M', 'レントゲン'), - (0x3357, 'M', 'ワット'), - (0x3358, 'M', '0点'), - (0x3359, 'M', '1点'), - (0x335A, 'M', '2点'), - (0x335B, 'M', '3点'), - (0x335C, 'M', '4点'), - (0x335D, 'M', '5点'), - (0x335E, 'M', '6点'), - (0x335F, 'M', '7点'), - (0x3360, 'M', '8点'), - (0x3361, 'M', '9点'), - (0x3362, 'M', '10点'), - (0x3363, 'M', '11点'), - (0x3364, 'M', '12点'), - (0x3365, 'M', '13点'), - (0x3366, 'M', '14点'), - (0x3367, 'M', '15点'), - (0x3368, 'M', '16点'), - (0x3369, 'M', '17点'), - (0x336A, 'M', '18点'), - (0x336B, 'M', '19点'), - (0x336C, 'M', '20点'), - (0x336D, 'M', '21点'), - (0x336E, 'M', '22点'), - (0x336F, 'M', '23点'), - (0x3370, 'M', '24点'), - (0x3371, 'M', 'hpa'), - (0x3372, 'M', 'da'), - (0x3373, 'M', 'au'), - (0x3374, 'M', 'bar'), - (0x3375, 'M', 'ov'), - (0x3376, 'M', 'pc'), - (0x3377, 'M', 'dm'), - (0x3378, 'M', 'dm2'), - (0x3379, 'M', 'dm3'), - ] - -def _seg_34() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x337A, 'M', 'iu'), - (0x337B, 'M', '平成'), - (0x337C, 'M', '昭和'), - (0x337D, 'M', '大正'), - (0x337E, 'M', '明治'), - (0x337F, 'M', '株式会社'), - (0x3380, 'M', 'pa'), - (0x3381, 'M', 'na'), - (0x3382, 'M', 'μa'), - (0x3383, 'M', 'ma'), - (0x3384, 'M', 'ka'), - (0x3385, 'M', 'kb'), - (0x3386, 'M', 'mb'), - (0x3387, 'M', 'gb'), - (0x3388, 'M', 'cal'), - (0x3389, 'M', 'kcal'), - (0x338A, 'M', 'pf'), - (0x338B, 'M', 'nf'), - (0x338C, 'M', 'μf'), - (0x338D, 'M', 'μg'), - (0x338E, 'M', 'mg'), - (0x338F, 'M', 'kg'), - (0x3390, 'M', 'hz'), - (0x3391, 'M', 'khz'), - (0x3392, 'M', 'mhz'), - (0x3393, 'M', 'ghz'), - (0x3394, 'M', 'thz'), - (0x3395, 'M', 'μl'), - (0x3396, 'M', 'ml'), - (0x3397, 'M', 'dl'), - (0x3398, 'M', 'kl'), - (0x3399, 'M', 'fm'), - (0x339A, 'M', 'nm'), - (0x339B, 'M', 'μm'), - (0x339C, 'M', 'mm'), - (0x339D, 'M', 'cm'), - (0x339E, 'M', 'km'), - (0x339F, 'M', 'mm2'), - (0x33A0, 'M', 'cm2'), - (0x33A1, 'M', 'm2'), - (0x33A2, 'M', 'km2'), - (0x33A3, 'M', 'mm3'), - (0x33A4, 'M', 'cm3'), - (0x33A5, 'M', 'm3'), - (0x33A6, 'M', 'km3'), - (0x33A7, 'M', 'm∕s'), - (0x33A8, 'M', 'm∕s2'), - (0x33A9, 'M', 'pa'), - (0x33AA, 'M', 'kpa'), - (0x33AB, 'M', 'mpa'), - (0x33AC, 'M', 'gpa'), - (0x33AD, 'M', 'rad'), - (0x33AE, 'M', 'rad∕s'), - (0x33AF, 'M', 'rad∕s2'), - (0x33B0, 'M', 'ps'), - (0x33B1, 'M', 'ns'), - (0x33B2, 'M', 'μs'), - (0x33B3, 'M', 'ms'), - (0x33B4, 'M', 'pv'), - (0x33B5, 'M', 'nv'), - (0x33B6, 'M', 'μv'), - (0x33B7, 'M', 'mv'), - (0x33B8, 'M', 'kv'), - (0x33B9, 'M', 'mv'), - (0x33BA, 'M', 'pw'), - (0x33BB, 'M', 'nw'), - (0x33BC, 'M', 'μw'), - (0x33BD, 'M', 'mw'), - (0x33BE, 'M', 'kw'), - (0x33BF, 'M', 'mw'), - (0x33C0, 'M', 'kω'), - (0x33C1, 'M', 'mω'), - (0x33C2, 'X'), - (0x33C3, 'M', 'bq'), - (0x33C4, 'M', 'cc'), - (0x33C5, 'M', 'cd'), - (0x33C6, 'M', 'c∕kg'), - (0x33C7, 'X'), - (0x33C8, 'M', 'db'), - (0x33C9, 'M', 'gy'), - (0x33CA, 'M', 'ha'), - (0x33CB, 'M', 'hp'), - (0x33CC, 'M', 'in'), - (0x33CD, 'M', 'kk'), - (0x33CE, 'M', 'km'), - (0x33CF, 'M', 'kt'), - (0x33D0, 'M', 'lm'), - (0x33D1, 'M', 'ln'), - (0x33D2, 'M', 'log'), - (0x33D3, 'M', 'lx'), - (0x33D4, 'M', 'mb'), - (0x33D5, 'M', 'mil'), - (0x33D6, 'M', 'mol'), - (0x33D7, 'M', 'ph'), - (0x33D8, 'X'), - (0x33D9, 'M', 'ppm'), - (0x33DA, 'M', 'pr'), - (0x33DB, 'M', 'sr'), - (0x33DC, 'M', 'sv'), - (0x33DD, 'M', 'wb'), - ] - -def _seg_35() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x33DE, 'M', 'v∕m'), - (0x33DF, 'M', 'a∕m'), - (0x33E0, 'M', '1日'), - (0x33E1, 'M', '2日'), - (0x33E2, 'M', '3日'), - (0x33E3, 'M', '4日'), - (0x33E4, 'M', '5日'), - (0x33E5, 'M', '6日'), - (0x33E6, 'M', '7日'), - (0x33E7, 'M', '8日'), - (0x33E8, 'M', '9日'), - (0x33E9, 'M', '10日'), - (0x33EA, 'M', '11日'), - (0x33EB, 'M', '12日'), - (0x33EC, 'M', '13日'), - (0x33ED, 'M', '14日'), - (0x33EE, 'M', '15日'), - (0x33EF, 'M', '16日'), - (0x33F0, 'M', '17日'), - (0x33F1, 'M', '18日'), - (0x33F2, 'M', '19日'), - (0x33F3, 'M', '20日'), - (0x33F4, 'M', '21日'), - (0x33F5, 'M', '22日'), - (0x33F6, 'M', '23日'), - (0x33F7, 'M', '24日'), - (0x33F8, 'M', '25日'), - (0x33F9, 'M', '26日'), - (0x33FA, 'M', '27日'), - (0x33FB, 'M', '28日'), - (0x33FC, 'M', '29日'), - (0x33FD, 'M', '30日'), - (0x33FE, 'M', '31日'), - (0x33FF, 'M', 'gal'), - (0x3400, 'V'), - (0xA48D, 'X'), - (0xA490, 'V'), - (0xA4C7, 'X'), - (0xA4D0, 'V'), - (0xA62C, 'X'), - (0xA640, 'M', 'ꙁ'), - (0xA641, 'V'), - (0xA642, 'M', 'ꙃ'), - (0xA643, 'V'), - (0xA644, 'M', 'ꙅ'), - (0xA645, 'V'), - (0xA646, 'M', 'ꙇ'), - (0xA647, 'V'), - (0xA648, 'M', 'ꙉ'), - (0xA649, 'V'), - (0xA64A, 'M', 'ꙋ'), - (0xA64B, 'V'), - (0xA64C, 'M', 'ꙍ'), - (0xA64D, 'V'), - (0xA64E, 'M', 'ꙏ'), - (0xA64F, 'V'), - (0xA650, 'M', 'ꙑ'), - (0xA651, 'V'), - (0xA652, 'M', 'ꙓ'), - (0xA653, 'V'), - (0xA654, 'M', 'ꙕ'), - (0xA655, 'V'), - (0xA656, 'M', 'ꙗ'), - (0xA657, 'V'), - (0xA658, 'M', 'ꙙ'), - (0xA659, 'V'), - (0xA65A, 'M', 'ꙛ'), - (0xA65B, 'V'), - (0xA65C, 'M', 'ꙝ'), - (0xA65D, 'V'), - (0xA65E, 'M', 'ꙟ'), - (0xA65F, 'V'), - (0xA660, 'M', 'ꙡ'), - (0xA661, 'V'), - (0xA662, 'M', 'ꙣ'), - (0xA663, 'V'), - (0xA664, 'M', 'ꙥ'), - (0xA665, 'V'), - (0xA666, 'M', 'ꙧ'), - (0xA667, 'V'), - (0xA668, 'M', 'ꙩ'), - (0xA669, 'V'), - (0xA66A, 'M', 'ꙫ'), - (0xA66B, 'V'), - (0xA66C, 'M', 'ꙭ'), - (0xA66D, 'V'), - (0xA680, 'M', 'ꚁ'), - (0xA681, 'V'), - (0xA682, 'M', 'ꚃ'), - (0xA683, 'V'), - (0xA684, 'M', 'ꚅ'), - (0xA685, 'V'), - (0xA686, 'M', 'ꚇ'), - (0xA687, 'V'), - (0xA688, 'M', 'ꚉ'), - (0xA689, 'V'), - (0xA68A, 'M', 'ꚋ'), - (0xA68B, 'V'), - (0xA68C, 'M', 'ꚍ'), - (0xA68D, 'V'), - ] - -def _seg_36() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA68E, 'M', 'ꚏ'), - (0xA68F, 'V'), - (0xA690, 'M', 'ꚑ'), - (0xA691, 'V'), - (0xA692, 'M', 'ꚓ'), - (0xA693, 'V'), - (0xA694, 'M', 'ꚕ'), - (0xA695, 'V'), - (0xA696, 'M', 'ꚗ'), - (0xA697, 'V'), - (0xA698, 'M', 'ꚙ'), - (0xA699, 'V'), - (0xA69A, 'M', 'ꚛ'), - (0xA69B, 'V'), - (0xA69C, 'M', 'ъ'), - (0xA69D, 'M', 'ь'), - (0xA69E, 'V'), - (0xA6F8, 'X'), - (0xA700, 'V'), - (0xA722, 'M', 'ꜣ'), - (0xA723, 'V'), - (0xA724, 'M', 'ꜥ'), - (0xA725, 'V'), - (0xA726, 'M', 'ꜧ'), - (0xA727, 'V'), - (0xA728, 'M', 'ꜩ'), - (0xA729, 'V'), - (0xA72A, 'M', 'ꜫ'), - (0xA72B, 'V'), - (0xA72C, 'M', 'ꜭ'), - (0xA72D, 'V'), - (0xA72E, 'M', 'ꜯ'), - (0xA72F, 'V'), - (0xA732, 'M', 'ꜳ'), - (0xA733, 'V'), - (0xA734, 'M', 'ꜵ'), - (0xA735, 'V'), - (0xA736, 'M', 'ꜷ'), - (0xA737, 'V'), - (0xA738, 'M', 'ꜹ'), - (0xA739, 'V'), - (0xA73A, 'M', 'ꜻ'), - (0xA73B, 'V'), - (0xA73C, 'M', 'ꜽ'), - (0xA73D, 'V'), - (0xA73E, 'M', 'ꜿ'), - (0xA73F, 'V'), - (0xA740, 'M', 'ꝁ'), - (0xA741, 'V'), - (0xA742, 'M', 'ꝃ'), - (0xA743, 'V'), - (0xA744, 'M', 'ꝅ'), - (0xA745, 'V'), - (0xA746, 'M', 'ꝇ'), - (0xA747, 'V'), - (0xA748, 'M', 'ꝉ'), - (0xA749, 'V'), - (0xA74A, 'M', 'ꝋ'), - (0xA74B, 'V'), - (0xA74C, 'M', 'ꝍ'), - (0xA74D, 'V'), - (0xA74E, 'M', 'ꝏ'), - (0xA74F, 'V'), - (0xA750, 'M', 'ꝑ'), - (0xA751, 'V'), - (0xA752, 'M', 'ꝓ'), - (0xA753, 'V'), - (0xA754, 'M', 'ꝕ'), - (0xA755, 'V'), - (0xA756, 'M', 'ꝗ'), - (0xA757, 'V'), - (0xA758, 'M', 'ꝙ'), - (0xA759, 'V'), - (0xA75A, 'M', 'ꝛ'), - (0xA75B, 'V'), - (0xA75C, 'M', 'ꝝ'), - (0xA75D, 'V'), - (0xA75E, 'M', 'ꝟ'), - (0xA75F, 'V'), - (0xA760, 'M', 'ꝡ'), - (0xA761, 'V'), - (0xA762, 'M', 'ꝣ'), - (0xA763, 'V'), - (0xA764, 'M', 'ꝥ'), - (0xA765, 'V'), - (0xA766, 'M', 'ꝧ'), - (0xA767, 'V'), - (0xA768, 'M', 'ꝩ'), - (0xA769, 'V'), - (0xA76A, 'M', 'ꝫ'), - (0xA76B, 'V'), - (0xA76C, 'M', 'ꝭ'), - (0xA76D, 'V'), - (0xA76E, 'M', 'ꝯ'), - (0xA76F, 'V'), - (0xA770, 'M', 'ꝯ'), - (0xA771, 'V'), - (0xA779, 'M', 'ꝺ'), - (0xA77A, 'V'), - (0xA77B, 'M', 'ꝼ'), - ] - -def _seg_37() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA77C, 'V'), - (0xA77D, 'M', 'ᵹ'), - (0xA77E, 'M', 'ꝿ'), - (0xA77F, 'V'), - (0xA780, 'M', 'ꞁ'), - (0xA781, 'V'), - (0xA782, 'M', 'ꞃ'), - (0xA783, 'V'), - (0xA784, 'M', 'ꞅ'), - (0xA785, 'V'), - (0xA786, 'M', 'ꞇ'), - (0xA787, 'V'), - (0xA78B, 'M', 'ꞌ'), - (0xA78C, 'V'), - (0xA78D, 'M', 'ɥ'), - (0xA78E, 'V'), - (0xA790, 'M', 'ꞑ'), - (0xA791, 'V'), - (0xA792, 'M', 'ꞓ'), - (0xA793, 'V'), - (0xA796, 'M', 'ꞗ'), - (0xA797, 'V'), - (0xA798, 'M', 'ꞙ'), - (0xA799, 'V'), - (0xA79A, 'M', 'ꞛ'), - (0xA79B, 'V'), - (0xA79C, 'M', 'ꞝ'), - (0xA79D, 'V'), - (0xA79E, 'M', 'ꞟ'), - (0xA79F, 'V'), - (0xA7A0, 'M', 'ꞡ'), - (0xA7A1, 'V'), - (0xA7A2, 'M', 'ꞣ'), - (0xA7A3, 'V'), - (0xA7A4, 'M', 'ꞥ'), - (0xA7A5, 'V'), - (0xA7A6, 'M', 'ꞧ'), - (0xA7A7, 'V'), - (0xA7A8, 'M', 'ꞩ'), - (0xA7A9, 'V'), - (0xA7AA, 'M', 'ɦ'), - (0xA7AB, 'M', 'ɜ'), - (0xA7AC, 'M', 'ɡ'), - (0xA7AD, 'M', 'ɬ'), - (0xA7AE, 'M', 'ɪ'), - (0xA7AF, 'V'), - (0xA7B0, 'M', 'ʞ'), - (0xA7B1, 'M', 'ʇ'), - (0xA7B2, 'M', 'ʝ'), - (0xA7B3, 'M', 'ꭓ'), - (0xA7B4, 'M', 'ꞵ'), - (0xA7B5, 'V'), - (0xA7B6, 'M', 'ꞷ'), - (0xA7B7, 'V'), - (0xA7B8, 'M', 'ꞹ'), - (0xA7B9, 'V'), - (0xA7BA, 'M', 'ꞻ'), - (0xA7BB, 'V'), - (0xA7BC, 'M', 'ꞽ'), - (0xA7BD, 'V'), - (0xA7BE, 'M', 'ꞿ'), - (0xA7BF, 'V'), - (0xA7C0, 'M', 'ꟁ'), - (0xA7C1, 'V'), - (0xA7C2, 'M', 'ꟃ'), - (0xA7C3, 'V'), - (0xA7C4, 'M', 'ꞔ'), - (0xA7C5, 'M', 'ʂ'), - (0xA7C6, 'M', 'ᶎ'), - (0xA7C7, 'M', 'ꟈ'), - (0xA7C8, 'V'), - (0xA7C9, 'M', 'ꟊ'), - (0xA7CA, 'V'), - (0xA7CB, 'X'), - (0xA7D0, 'M', 'ꟑ'), - (0xA7D1, 'V'), - (0xA7D2, 'X'), - (0xA7D3, 'V'), - (0xA7D4, 'X'), - (0xA7D5, 'V'), - (0xA7D6, 'M', 'ꟗ'), - (0xA7D7, 'V'), - (0xA7D8, 'M', 'ꟙ'), - (0xA7D9, 'V'), - (0xA7DA, 'X'), - (0xA7F2, 'M', 'c'), - (0xA7F3, 'M', 'f'), - (0xA7F4, 'M', 'q'), - (0xA7F5, 'M', 'ꟶ'), - (0xA7F6, 'V'), - (0xA7F8, 'M', 'ħ'), - (0xA7F9, 'M', 'œ'), - (0xA7FA, 'V'), - (0xA82D, 'X'), - (0xA830, 'V'), - (0xA83A, 'X'), - (0xA840, 'V'), - (0xA878, 'X'), - (0xA880, 'V'), - (0xA8C6, 'X'), - ] - -def _seg_38() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xA8CE, 'V'), - (0xA8DA, 'X'), - (0xA8E0, 'V'), - (0xA954, 'X'), - (0xA95F, 'V'), - (0xA97D, 'X'), - (0xA980, 'V'), - (0xA9CE, 'X'), - (0xA9CF, 'V'), - (0xA9DA, 'X'), - (0xA9DE, 'V'), - (0xA9FF, 'X'), - (0xAA00, 'V'), - (0xAA37, 'X'), - (0xAA40, 'V'), - (0xAA4E, 'X'), - (0xAA50, 'V'), - (0xAA5A, 'X'), - (0xAA5C, 'V'), - (0xAAC3, 'X'), - (0xAADB, 'V'), - (0xAAF7, 'X'), - (0xAB01, 'V'), - (0xAB07, 'X'), - (0xAB09, 'V'), - (0xAB0F, 'X'), - (0xAB11, 'V'), - (0xAB17, 'X'), - (0xAB20, 'V'), - (0xAB27, 'X'), - (0xAB28, 'V'), - (0xAB2F, 'X'), - (0xAB30, 'V'), - (0xAB5C, 'M', 'ꜧ'), - (0xAB5D, 'M', 'ꬷ'), - (0xAB5E, 'M', 'ɫ'), - (0xAB5F, 'M', 'ꭒ'), - (0xAB60, 'V'), - (0xAB69, 'M', 'ʍ'), - (0xAB6A, 'V'), - (0xAB6C, 'X'), - (0xAB70, 'M', 'Ꭰ'), - (0xAB71, 'M', 'Ꭱ'), - (0xAB72, 'M', 'Ꭲ'), - (0xAB73, 'M', 'Ꭳ'), - (0xAB74, 'M', 'Ꭴ'), - (0xAB75, 'M', 'Ꭵ'), - (0xAB76, 'M', 'Ꭶ'), - (0xAB77, 'M', 'Ꭷ'), - (0xAB78, 'M', 'Ꭸ'), - (0xAB79, 'M', 'Ꭹ'), - (0xAB7A, 'M', 'Ꭺ'), - (0xAB7B, 'M', 'Ꭻ'), - (0xAB7C, 'M', 'Ꭼ'), - (0xAB7D, 'M', 'Ꭽ'), - (0xAB7E, 'M', 'Ꭾ'), - (0xAB7F, 'M', 'Ꭿ'), - (0xAB80, 'M', 'Ꮀ'), - (0xAB81, 'M', 'Ꮁ'), - (0xAB82, 'M', 'Ꮂ'), - (0xAB83, 'M', 'Ꮃ'), - (0xAB84, 'M', 'Ꮄ'), - (0xAB85, 'M', 'Ꮅ'), - (0xAB86, 'M', 'Ꮆ'), - (0xAB87, 'M', 'Ꮇ'), - (0xAB88, 'M', 'Ꮈ'), - (0xAB89, 'M', 'Ꮉ'), - (0xAB8A, 'M', 'Ꮊ'), - (0xAB8B, 'M', 'Ꮋ'), - (0xAB8C, 'M', 'Ꮌ'), - (0xAB8D, 'M', 'Ꮍ'), - (0xAB8E, 'M', 'Ꮎ'), - (0xAB8F, 'M', 'Ꮏ'), - (0xAB90, 'M', 'Ꮐ'), - (0xAB91, 'M', 'Ꮑ'), - (0xAB92, 'M', 'Ꮒ'), - (0xAB93, 'M', 'Ꮓ'), - (0xAB94, 'M', 'Ꮔ'), - (0xAB95, 'M', 'Ꮕ'), - (0xAB96, 'M', 'Ꮖ'), - (0xAB97, 'M', 'Ꮗ'), - (0xAB98, 'M', 'Ꮘ'), - (0xAB99, 'M', 'Ꮙ'), - (0xAB9A, 'M', 'Ꮚ'), - (0xAB9B, 'M', 'Ꮛ'), - (0xAB9C, 'M', 'Ꮜ'), - (0xAB9D, 'M', 'Ꮝ'), - (0xAB9E, 'M', 'Ꮞ'), - (0xAB9F, 'M', 'Ꮟ'), - (0xABA0, 'M', 'Ꮠ'), - (0xABA1, 'M', 'Ꮡ'), - (0xABA2, 'M', 'Ꮢ'), - (0xABA3, 'M', 'Ꮣ'), - (0xABA4, 'M', 'Ꮤ'), - (0xABA5, 'M', 'Ꮥ'), - (0xABA6, 'M', 'Ꮦ'), - (0xABA7, 'M', 'Ꮧ'), - (0xABA8, 'M', 'Ꮨ'), - (0xABA9, 'M', 'Ꮩ'), - (0xABAA, 'M', 'Ꮪ'), - ] - -def _seg_39() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xABAB, 'M', 'Ꮫ'), - (0xABAC, 'M', 'Ꮬ'), - (0xABAD, 'M', 'Ꮭ'), - (0xABAE, 'M', 'Ꮮ'), - (0xABAF, 'M', 'Ꮯ'), - (0xABB0, 'M', 'Ꮰ'), - (0xABB1, 'M', 'Ꮱ'), - (0xABB2, 'M', 'Ꮲ'), - (0xABB3, 'M', 'Ꮳ'), - (0xABB4, 'M', 'Ꮴ'), - (0xABB5, 'M', 'Ꮵ'), - (0xABB6, 'M', 'Ꮶ'), - (0xABB7, 'M', 'Ꮷ'), - (0xABB8, 'M', 'Ꮸ'), - (0xABB9, 'M', 'Ꮹ'), - (0xABBA, 'M', 'Ꮺ'), - (0xABBB, 'M', 'Ꮻ'), - (0xABBC, 'M', 'Ꮼ'), - (0xABBD, 'M', 'Ꮽ'), - (0xABBE, 'M', 'Ꮾ'), - (0xABBF, 'M', 'Ꮿ'), - (0xABC0, 'V'), - (0xABEE, 'X'), - (0xABF0, 'V'), - (0xABFA, 'X'), - (0xAC00, 'V'), - (0xD7A4, 'X'), - (0xD7B0, 'V'), - (0xD7C7, 'X'), - (0xD7CB, 'V'), - (0xD7FC, 'X'), - (0xF900, 'M', '豈'), - (0xF901, 'M', '更'), - (0xF902, 'M', '車'), - (0xF903, 'M', '賈'), - (0xF904, 'M', '滑'), - (0xF905, 'M', '串'), - (0xF906, 'M', '句'), - (0xF907, 'M', '龜'), - (0xF909, 'M', '契'), - (0xF90A, 'M', '金'), - (0xF90B, 'M', '喇'), - (0xF90C, 'M', '奈'), - (0xF90D, 'M', '懶'), - (0xF90E, 'M', '癩'), - (0xF90F, 'M', '羅'), - (0xF910, 'M', '蘿'), - (0xF911, 'M', '螺'), - (0xF912, 'M', '裸'), - (0xF913, 'M', '邏'), - (0xF914, 'M', '樂'), - (0xF915, 'M', '洛'), - (0xF916, 'M', '烙'), - (0xF917, 'M', '珞'), - (0xF918, 'M', '落'), - (0xF919, 'M', '酪'), - (0xF91A, 'M', '駱'), - (0xF91B, 'M', '亂'), - (0xF91C, 'M', '卵'), - (0xF91D, 'M', '欄'), - (0xF91E, 'M', '爛'), - (0xF91F, 'M', '蘭'), - (0xF920, 'M', '鸞'), - (0xF921, 'M', '嵐'), - (0xF922, 'M', '濫'), - (0xF923, 'M', '藍'), - (0xF924, 'M', '襤'), - (0xF925, 'M', '拉'), - (0xF926, 'M', '臘'), - (0xF927, 'M', '蠟'), - (0xF928, 'M', '廊'), - (0xF929, 'M', '朗'), - (0xF92A, 'M', '浪'), - (0xF92B, 'M', '狼'), - (0xF92C, 'M', '郎'), - (0xF92D, 'M', '來'), - (0xF92E, 'M', '冷'), - (0xF92F, 'M', '勞'), - (0xF930, 'M', '擄'), - (0xF931, 'M', '櫓'), - (0xF932, 'M', '爐'), - (0xF933, 'M', '盧'), - (0xF934, 'M', '老'), - (0xF935, 'M', '蘆'), - (0xF936, 'M', '虜'), - (0xF937, 'M', '路'), - (0xF938, 'M', '露'), - (0xF939, 'M', '魯'), - (0xF93A, 'M', '鷺'), - (0xF93B, 'M', '碌'), - (0xF93C, 'M', '祿'), - (0xF93D, 'M', '綠'), - (0xF93E, 'M', '菉'), - (0xF93F, 'M', '錄'), - (0xF940, 'M', '鹿'), - (0xF941, 'M', '論'), - (0xF942, 'M', '壟'), - (0xF943, 'M', '弄'), - (0xF944, 'M', '籠'), - (0xF945, 'M', '聾'), - ] - -def _seg_40() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xF946, 'M', '牢'), - (0xF947, 'M', '磊'), - (0xF948, 'M', '賂'), - (0xF949, 'M', '雷'), - (0xF94A, 'M', '壘'), - (0xF94B, 'M', '屢'), - (0xF94C, 'M', '樓'), - (0xF94D, 'M', '淚'), - (0xF94E, 'M', '漏'), - (0xF94F, 'M', '累'), - (0xF950, 'M', '縷'), - (0xF951, 'M', '陋'), - (0xF952, 'M', '勒'), - (0xF953, 'M', '肋'), - (0xF954, 'M', '凜'), - (0xF955, 'M', '凌'), - (0xF956, 'M', '稜'), - (0xF957, 'M', '綾'), - (0xF958, 'M', '菱'), - (0xF959, 'M', '陵'), - (0xF95A, 'M', '讀'), - (0xF95B, 'M', '拏'), - (0xF95C, 'M', '樂'), - (0xF95D, 'M', '諾'), - (0xF95E, 'M', '丹'), - (0xF95F, 'M', '寧'), - (0xF960, 'M', '怒'), - (0xF961, 'M', '率'), - (0xF962, 'M', '異'), - (0xF963, 'M', '北'), - (0xF964, 'M', '磻'), - (0xF965, 'M', '便'), - (0xF966, 'M', '復'), - (0xF967, 'M', '不'), - (0xF968, 'M', '泌'), - (0xF969, 'M', '數'), - (0xF96A, 'M', '索'), - (0xF96B, 'M', '參'), - (0xF96C, 'M', '塞'), - (0xF96D, 'M', '省'), - (0xF96E, 'M', '葉'), - (0xF96F, 'M', '說'), - (0xF970, 'M', '殺'), - (0xF971, 'M', '辰'), - (0xF972, 'M', '沈'), - (0xF973, 'M', '拾'), - (0xF974, 'M', '若'), - (0xF975, 'M', '掠'), - (0xF976, 'M', '略'), - (0xF977, 'M', '亮'), - (0xF978, 'M', '兩'), - (0xF979, 'M', '凉'), - (0xF97A, 'M', '梁'), - (0xF97B, 'M', '糧'), - (0xF97C, 'M', '良'), - (0xF97D, 'M', '諒'), - (0xF97E, 'M', '量'), - (0xF97F, 'M', '勵'), - (0xF980, 'M', '呂'), - (0xF981, 'M', '女'), - (0xF982, 'M', '廬'), - (0xF983, 'M', '旅'), - (0xF984, 'M', '濾'), - (0xF985, 'M', '礪'), - (0xF986, 'M', '閭'), - (0xF987, 'M', '驪'), - (0xF988, 'M', '麗'), - (0xF989, 'M', '黎'), - (0xF98A, 'M', '力'), - (0xF98B, 'M', '曆'), - (0xF98C, 'M', '歷'), - (0xF98D, 'M', '轢'), - (0xF98E, 'M', '年'), - (0xF98F, 'M', '憐'), - (0xF990, 'M', '戀'), - (0xF991, 'M', '撚'), - (0xF992, 'M', '漣'), - (0xF993, 'M', '煉'), - (0xF994, 'M', '璉'), - (0xF995, 'M', '秊'), - (0xF996, 'M', '練'), - (0xF997, 'M', '聯'), - (0xF998, 'M', '輦'), - (0xF999, 'M', '蓮'), - (0xF99A, 'M', '連'), - (0xF99B, 'M', '鍊'), - (0xF99C, 'M', '列'), - (0xF99D, 'M', '劣'), - (0xF99E, 'M', '咽'), - (0xF99F, 'M', '烈'), - (0xF9A0, 'M', '裂'), - (0xF9A1, 'M', '說'), - (0xF9A2, 'M', '廉'), - (0xF9A3, 'M', '念'), - (0xF9A4, 'M', '捻'), - (0xF9A5, 'M', '殮'), - (0xF9A6, 'M', '簾'), - (0xF9A7, 'M', '獵'), - (0xF9A8, 'M', '令'), - (0xF9A9, 'M', '囹'), - ] - -def _seg_41() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xF9AA, 'M', '寧'), - (0xF9AB, 'M', '嶺'), - (0xF9AC, 'M', '怜'), - (0xF9AD, 'M', '玲'), - (0xF9AE, 'M', '瑩'), - (0xF9AF, 'M', '羚'), - (0xF9B0, 'M', '聆'), - (0xF9B1, 'M', '鈴'), - (0xF9B2, 'M', '零'), - (0xF9B3, 'M', '靈'), - (0xF9B4, 'M', '領'), - (0xF9B5, 'M', '例'), - (0xF9B6, 'M', '禮'), - (0xF9B7, 'M', '醴'), - (0xF9B8, 'M', '隸'), - (0xF9B9, 'M', '惡'), - (0xF9BA, 'M', '了'), - (0xF9BB, 'M', '僚'), - (0xF9BC, 'M', '寮'), - (0xF9BD, 'M', '尿'), - (0xF9BE, 'M', '料'), - (0xF9BF, 'M', '樂'), - (0xF9C0, 'M', '燎'), - (0xF9C1, 'M', '療'), - (0xF9C2, 'M', '蓼'), - (0xF9C3, 'M', '遼'), - (0xF9C4, 'M', '龍'), - (0xF9C5, 'M', '暈'), - (0xF9C6, 'M', '阮'), - (0xF9C7, 'M', '劉'), - (0xF9C8, 'M', '杻'), - (0xF9C9, 'M', '柳'), - (0xF9CA, 'M', '流'), - (0xF9CB, 'M', '溜'), - (0xF9CC, 'M', '琉'), - (0xF9CD, 'M', '留'), - (0xF9CE, 'M', '硫'), - (0xF9CF, 'M', '紐'), - (0xF9D0, 'M', '類'), - (0xF9D1, 'M', '六'), - (0xF9D2, 'M', '戮'), - (0xF9D3, 'M', '陸'), - (0xF9D4, 'M', '倫'), - (0xF9D5, 'M', '崙'), - (0xF9D6, 'M', '淪'), - (0xF9D7, 'M', '輪'), - (0xF9D8, 'M', '律'), - (0xF9D9, 'M', '慄'), - (0xF9DA, 'M', '栗'), - (0xF9DB, 'M', '率'), - (0xF9DC, 'M', '隆'), - (0xF9DD, 'M', '利'), - (0xF9DE, 'M', '吏'), - (0xF9DF, 'M', '履'), - (0xF9E0, 'M', '易'), - (0xF9E1, 'M', '李'), - (0xF9E2, 'M', '梨'), - (0xF9E3, 'M', '泥'), - (0xF9E4, 'M', '理'), - (0xF9E5, 'M', '痢'), - (0xF9E6, 'M', '罹'), - (0xF9E7, 'M', '裏'), - (0xF9E8, 'M', '裡'), - (0xF9E9, 'M', '里'), - (0xF9EA, 'M', '離'), - (0xF9EB, 'M', '匿'), - (0xF9EC, 'M', '溺'), - (0xF9ED, 'M', '吝'), - (0xF9EE, 'M', '燐'), - (0xF9EF, 'M', '璘'), - (0xF9F0, 'M', '藺'), - (0xF9F1, 'M', '隣'), - (0xF9F2, 'M', '鱗'), - (0xF9F3, 'M', '麟'), - (0xF9F4, 'M', '林'), - (0xF9F5, 'M', '淋'), - (0xF9F6, 'M', '臨'), - (0xF9F7, 'M', '立'), - (0xF9F8, 'M', '笠'), - (0xF9F9, 'M', '粒'), - (0xF9FA, 'M', '狀'), - (0xF9FB, 'M', '炙'), - (0xF9FC, 'M', '識'), - (0xF9FD, 'M', '什'), - (0xF9FE, 'M', '茶'), - (0xF9FF, 'M', '刺'), - (0xFA00, 'M', '切'), - (0xFA01, 'M', '度'), - (0xFA02, 'M', '拓'), - (0xFA03, 'M', '糖'), - (0xFA04, 'M', '宅'), - (0xFA05, 'M', '洞'), - (0xFA06, 'M', '暴'), - (0xFA07, 'M', '輻'), - (0xFA08, 'M', '行'), - (0xFA09, 'M', '降'), - (0xFA0A, 'M', '見'), - (0xFA0B, 'M', '廓'), - (0xFA0C, 'M', '兀'), - (0xFA0D, 'M', '嗀'), - ] - -def _seg_42() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFA0E, 'V'), - (0xFA10, 'M', '塚'), - (0xFA11, 'V'), - (0xFA12, 'M', '晴'), - (0xFA13, 'V'), - (0xFA15, 'M', '凞'), - (0xFA16, 'M', '猪'), - (0xFA17, 'M', '益'), - (0xFA18, 'M', '礼'), - (0xFA19, 'M', '神'), - (0xFA1A, 'M', '祥'), - (0xFA1B, 'M', '福'), - (0xFA1C, 'M', '靖'), - (0xFA1D, 'M', '精'), - (0xFA1E, 'M', '羽'), - (0xFA1F, 'V'), - (0xFA20, 'M', '蘒'), - (0xFA21, 'V'), - (0xFA22, 'M', '諸'), - (0xFA23, 'V'), - (0xFA25, 'M', '逸'), - (0xFA26, 'M', '都'), - (0xFA27, 'V'), - (0xFA2A, 'M', '飯'), - (0xFA2B, 'M', '飼'), - (0xFA2C, 'M', '館'), - (0xFA2D, 'M', '鶴'), - (0xFA2E, 'M', '郞'), - (0xFA2F, 'M', '隷'), - (0xFA30, 'M', '侮'), - (0xFA31, 'M', '僧'), - (0xFA32, 'M', '免'), - (0xFA33, 'M', '勉'), - (0xFA34, 'M', '勤'), - (0xFA35, 'M', '卑'), - (0xFA36, 'M', '喝'), - (0xFA37, 'M', '嘆'), - (0xFA38, 'M', '器'), - (0xFA39, 'M', '塀'), - (0xFA3A, 'M', '墨'), - (0xFA3B, 'M', '層'), - (0xFA3C, 'M', '屮'), - (0xFA3D, 'M', '悔'), - (0xFA3E, 'M', '慨'), - (0xFA3F, 'M', '憎'), - (0xFA40, 'M', '懲'), - (0xFA41, 'M', '敏'), - (0xFA42, 'M', '既'), - (0xFA43, 'M', '暑'), - (0xFA44, 'M', '梅'), - (0xFA45, 'M', '海'), - (0xFA46, 'M', '渚'), - (0xFA47, 'M', '漢'), - (0xFA48, 'M', '煮'), - (0xFA49, 'M', '爫'), - (0xFA4A, 'M', '琢'), - (0xFA4B, 'M', '碑'), - (0xFA4C, 'M', '社'), - (0xFA4D, 'M', '祉'), - (0xFA4E, 'M', '祈'), - (0xFA4F, 'M', '祐'), - (0xFA50, 'M', '祖'), - (0xFA51, 'M', '祝'), - (0xFA52, 'M', '禍'), - (0xFA53, 'M', '禎'), - (0xFA54, 'M', '穀'), - (0xFA55, 'M', '突'), - (0xFA56, 'M', '節'), - (0xFA57, 'M', '練'), - (0xFA58, 'M', '縉'), - (0xFA59, 'M', '繁'), - (0xFA5A, 'M', '署'), - (0xFA5B, 'M', '者'), - (0xFA5C, 'M', '臭'), - (0xFA5D, 'M', '艹'), - (0xFA5F, 'M', '著'), - (0xFA60, 'M', '褐'), - (0xFA61, 'M', '視'), - (0xFA62, 'M', '謁'), - (0xFA63, 'M', '謹'), - (0xFA64, 'M', '賓'), - (0xFA65, 'M', '贈'), - (0xFA66, 'M', '辶'), - (0xFA67, 'M', '逸'), - (0xFA68, 'M', '難'), - (0xFA69, 'M', '響'), - (0xFA6A, 'M', '頻'), - (0xFA6B, 'M', '恵'), - (0xFA6C, 'M', '𤋮'), - (0xFA6D, 'M', '舘'), - (0xFA6E, 'X'), - (0xFA70, 'M', '並'), - (0xFA71, 'M', '况'), - (0xFA72, 'M', '全'), - (0xFA73, 'M', '侀'), - (0xFA74, 'M', '充'), - (0xFA75, 'M', '冀'), - (0xFA76, 'M', '勇'), - (0xFA77, 'M', '勺'), - (0xFA78, 'M', '喝'), - ] - -def _seg_43() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFA79, 'M', '啕'), - (0xFA7A, 'M', '喙'), - (0xFA7B, 'M', '嗢'), - (0xFA7C, 'M', '塚'), - (0xFA7D, 'M', '墳'), - (0xFA7E, 'M', '奄'), - (0xFA7F, 'M', '奔'), - (0xFA80, 'M', '婢'), - (0xFA81, 'M', '嬨'), - (0xFA82, 'M', '廒'), - (0xFA83, 'M', '廙'), - (0xFA84, 'M', '彩'), - (0xFA85, 'M', '徭'), - (0xFA86, 'M', '惘'), - (0xFA87, 'M', '慎'), - (0xFA88, 'M', '愈'), - (0xFA89, 'M', '憎'), - (0xFA8A, 'M', '慠'), - (0xFA8B, 'M', '懲'), - (0xFA8C, 'M', '戴'), - (0xFA8D, 'M', '揄'), - (0xFA8E, 'M', '搜'), - (0xFA8F, 'M', '摒'), - (0xFA90, 'M', '敖'), - (0xFA91, 'M', '晴'), - (0xFA92, 'M', '朗'), - (0xFA93, 'M', '望'), - (0xFA94, 'M', '杖'), - (0xFA95, 'M', '歹'), - (0xFA96, 'M', '殺'), - (0xFA97, 'M', '流'), - (0xFA98, 'M', '滛'), - (0xFA99, 'M', '滋'), - (0xFA9A, 'M', '漢'), - (0xFA9B, 'M', '瀞'), - (0xFA9C, 'M', '煮'), - (0xFA9D, 'M', '瞧'), - (0xFA9E, 'M', '爵'), - (0xFA9F, 'M', '犯'), - (0xFAA0, 'M', '猪'), - (0xFAA1, 'M', '瑱'), - (0xFAA2, 'M', '甆'), - (0xFAA3, 'M', '画'), - (0xFAA4, 'M', '瘝'), - (0xFAA5, 'M', '瘟'), - (0xFAA6, 'M', '益'), - (0xFAA7, 'M', '盛'), - (0xFAA8, 'M', '直'), - (0xFAA9, 'M', '睊'), - (0xFAAA, 'M', '着'), - (0xFAAB, 'M', '磌'), - (0xFAAC, 'M', '窱'), - (0xFAAD, 'M', '節'), - (0xFAAE, 'M', '类'), - (0xFAAF, 'M', '絛'), - (0xFAB0, 'M', '練'), - (0xFAB1, 'M', '缾'), - (0xFAB2, 'M', '者'), - (0xFAB3, 'M', '荒'), - (0xFAB4, 'M', '華'), - (0xFAB5, 'M', '蝹'), - (0xFAB6, 'M', '襁'), - (0xFAB7, 'M', '覆'), - (0xFAB8, 'M', '視'), - (0xFAB9, 'M', '調'), - (0xFABA, 'M', '諸'), - (0xFABB, 'M', '請'), - (0xFABC, 'M', '謁'), - (0xFABD, 'M', '諾'), - (0xFABE, 'M', '諭'), - (0xFABF, 'M', '謹'), - (0xFAC0, 'M', '變'), - (0xFAC1, 'M', '贈'), - (0xFAC2, 'M', '輸'), - (0xFAC3, 'M', '遲'), - (0xFAC4, 'M', '醙'), - (0xFAC5, 'M', '鉶'), - (0xFAC6, 'M', '陼'), - (0xFAC7, 'M', '難'), - (0xFAC8, 'M', '靖'), - (0xFAC9, 'M', '韛'), - (0xFACA, 'M', '響'), - (0xFACB, 'M', '頋'), - (0xFACC, 'M', '頻'), - (0xFACD, 'M', '鬒'), - (0xFACE, 'M', '龜'), - (0xFACF, 'M', '𢡊'), - (0xFAD0, 'M', '𢡄'), - (0xFAD1, 'M', '𣏕'), - (0xFAD2, 'M', '㮝'), - (0xFAD3, 'M', '䀘'), - (0xFAD4, 'M', '䀹'), - (0xFAD5, 'M', '𥉉'), - (0xFAD6, 'M', '𥳐'), - (0xFAD7, 'M', '𧻓'), - (0xFAD8, 'M', '齃'), - (0xFAD9, 'M', '龎'), - (0xFADA, 'X'), - (0xFB00, 'M', 'ff'), - (0xFB01, 'M', 'fi'), - ] - -def _seg_44() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFB02, 'M', 'fl'), - (0xFB03, 'M', 'ffi'), - (0xFB04, 'M', 'ffl'), - (0xFB05, 'M', 'st'), - (0xFB07, 'X'), - (0xFB13, 'M', 'մն'), - (0xFB14, 'M', 'մե'), - (0xFB15, 'M', 'մի'), - (0xFB16, 'M', 'վն'), - (0xFB17, 'M', 'մխ'), - (0xFB18, 'X'), - (0xFB1D, 'M', 'יִ'), - (0xFB1E, 'V'), - (0xFB1F, 'M', 'ײַ'), - (0xFB20, 'M', 'ע'), - (0xFB21, 'M', 'א'), - (0xFB22, 'M', 'ד'), - (0xFB23, 'M', 'ה'), - (0xFB24, 'M', 'כ'), - (0xFB25, 'M', 'ל'), - (0xFB26, 'M', 'ם'), - (0xFB27, 'M', 'ר'), - (0xFB28, 'M', 'ת'), - (0xFB29, '3', '+'), - (0xFB2A, 'M', 'שׁ'), - (0xFB2B, 'M', 'שׂ'), - (0xFB2C, 'M', 'שּׁ'), - (0xFB2D, 'M', 'שּׂ'), - (0xFB2E, 'M', 'אַ'), - (0xFB2F, 'M', 'אָ'), - (0xFB30, 'M', 'אּ'), - (0xFB31, 'M', 'בּ'), - (0xFB32, 'M', 'גּ'), - (0xFB33, 'M', 'דּ'), - (0xFB34, 'M', 'הּ'), - (0xFB35, 'M', 'וּ'), - (0xFB36, 'M', 'זּ'), - (0xFB37, 'X'), - (0xFB38, 'M', 'טּ'), - (0xFB39, 'M', 'יּ'), - (0xFB3A, 'M', 'ךּ'), - (0xFB3B, 'M', 'כּ'), - (0xFB3C, 'M', 'לּ'), - (0xFB3D, 'X'), - (0xFB3E, 'M', 'מּ'), - (0xFB3F, 'X'), - (0xFB40, 'M', 'נּ'), - (0xFB41, 'M', 'סּ'), - (0xFB42, 'X'), - (0xFB43, 'M', 'ףּ'), - (0xFB44, 'M', 'פּ'), - (0xFB45, 'X'), - (0xFB46, 'M', 'צּ'), - (0xFB47, 'M', 'קּ'), - (0xFB48, 'M', 'רּ'), - (0xFB49, 'M', 'שּ'), - (0xFB4A, 'M', 'תּ'), - (0xFB4B, 'M', 'וֹ'), - (0xFB4C, 'M', 'בֿ'), - (0xFB4D, 'M', 'כֿ'), - (0xFB4E, 'M', 'פֿ'), - (0xFB4F, 'M', 'אל'), - (0xFB50, 'M', 'ٱ'), - (0xFB52, 'M', 'ٻ'), - (0xFB56, 'M', 'پ'), - (0xFB5A, 'M', 'ڀ'), - (0xFB5E, 'M', 'ٺ'), - (0xFB62, 'M', 'ٿ'), - (0xFB66, 'M', 'ٹ'), - (0xFB6A, 'M', 'ڤ'), - (0xFB6E, 'M', 'ڦ'), - (0xFB72, 'M', 'ڄ'), - (0xFB76, 'M', 'ڃ'), - (0xFB7A, 'M', 'چ'), - (0xFB7E, 'M', 'ڇ'), - (0xFB82, 'M', 'ڍ'), - (0xFB84, 'M', 'ڌ'), - (0xFB86, 'M', 'ڎ'), - (0xFB88, 'M', 'ڈ'), - (0xFB8A, 'M', 'ژ'), - (0xFB8C, 'M', 'ڑ'), - (0xFB8E, 'M', 'ک'), - (0xFB92, 'M', 'گ'), - (0xFB96, 'M', 'ڳ'), - (0xFB9A, 'M', 'ڱ'), - (0xFB9E, 'M', 'ں'), - (0xFBA0, 'M', 'ڻ'), - (0xFBA4, 'M', 'ۀ'), - (0xFBA6, 'M', 'ہ'), - (0xFBAA, 'M', 'ھ'), - (0xFBAE, 'M', 'ے'), - (0xFBB0, 'M', 'ۓ'), - (0xFBB2, 'V'), - (0xFBC3, 'X'), - (0xFBD3, 'M', 'ڭ'), - (0xFBD7, 'M', 'ۇ'), - (0xFBD9, 'M', 'ۆ'), - (0xFBDB, 'M', 'ۈ'), - (0xFBDD, 'M', 'ۇٴ'), - (0xFBDE, 'M', 'ۋ'), - ] - -def _seg_45() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFBE0, 'M', 'ۅ'), - (0xFBE2, 'M', 'ۉ'), - (0xFBE4, 'M', 'ې'), - (0xFBE8, 'M', 'ى'), - (0xFBEA, 'M', 'ئا'), - (0xFBEC, 'M', 'ئە'), - (0xFBEE, 'M', 'ئو'), - (0xFBF0, 'M', 'ئۇ'), - (0xFBF2, 'M', 'ئۆ'), - (0xFBF4, 'M', 'ئۈ'), - (0xFBF6, 'M', 'ئې'), - (0xFBF9, 'M', 'ئى'), - (0xFBFC, 'M', 'ی'), - (0xFC00, 'M', 'ئج'), - (0xFC01, 'M', 'ئح'), - (0xFC02, 'M', 'ئم'), - (0xFC03, 'M', 'ئى'), - (0xFC04, 'M', 'ئي'), - (0xFC05, 'M', 'بج'), - (0xFC06, 'M', 'بح'), - (0xFC07, 'M', 'بخ'), - (0xFC08, 'M', 'بم'), - (0xFC09, 'M', 'بى'), - (0xFC0A, 'M', 'بي'), - (0xFC0B, 'M', 'تج'), - (0xFC0C, 'M', 'تح'), - (0xFC0D, 'M', 'تخ'), - (0xFC0E, 'M', 'تم'), - (0xFC0F, 'M', 'تى'), - (0xFC10, 'M', 'تي'), - (0xFC11, 'M', 'ثج'), - (0xFC12, 'M', 'ثم'), - (0xFC13, 'M', 'ثى'), - (0xFC14, 'M', 'ثي'), - (0xFC15, 'M', 'جح'), - (0xFC16, 'M', 'جم'), - (0xFC17, 'M', 'حج'), - (0xFC18, 'M', 'حم'), - (0xFC19, 'M', 'خج'), - (0xFC1A, 'M', 'خح'), - (0xFC1B, 'M', 'خم'), - (0xFC1C, 'M', 'سج'), - (0xFC1D, 'M', 'سح'), - (0xFC1E, 'M', 'سخ'), - (0xFC1F, 'M', 'سم'), - (0xFC20, 'M', 'صح'), - (0xFC21, 'M', 'صم'), - (0xFC22, 'M', 'ضج'), - (0xFC23, 'M', 'ضح'), - (0xFC24, 'M', 'ضخ'), - (0xFC25, 'M', 'ضم'), - (0xFC26, 'M', 'طح'), - (0xFC27, 'M', 'طم'), - (0xFC28, 'M', 'ظم'), - (0xFC29, 'M', 'عج'), - (0xFC2A, 'M', 'عم'), - (0xFC2B, 'M', 'غج'), - (0xFC2C, 'M', 'غم'), - (0xFC2D, 'M', 'فج'), - (0xFC2E, 'M', 'فح'), - (0xFC2F, 'M', 'فخ'), - (0xFC30, 'M', 'فم'), - (0xFC31, 'M', 'فى'), - (0xFC32, 'M', 'في'), - (0xFC33, 'M', 'قح'), - (0xFC34, 'M', 'قم'), - (0xFC35, 'M', 'قى'), - (0xFC36, 'M', 'قي'), - (0xFC37, 'M', 'كا'), - (0xFC38, 'M', 'كج'), - (0xFC39, 'M', 'كح'), - (0xFC3A, 'M', 'كخ'), - (0xFC3B, 'M', 'كل'), - (0xFC3C, 'M', 'كم'), - (0xFC3D, 'M', 'كى'), - (0xFC3E, 'M', 'كي'), - (0xFC3F, 'M', 'لج'), - (0xFC40, 'M', 'لح'), - (0xFC41, 'M', 'لخ'), - (0xFC42, 'M', 'لم'), - (0xFC43, 'M', 'لى'), - (0xFC44, 'M', 'لي'), - (0xFC45, 'M', 'مج'), - (0xFC46, 'M', 'مح'), - (0xFC47, 'M', 'مخ'), - (0xFC48, 'M', 'مم'), - (0xFC49, 'M', 'مى'), - (0xFC4A, 'M', 'مي'), - (0xFC4B, 'M', 'نج'), - (0xFC4C, 'M', 'نح'), - (0xFC4D, 'M', 'نخ'), - (0xFC4E, 'M', 'نم'), - (0xFC4F, 'M', 'نى'), - (0xFC50, 'M', 'ني'), - (0xFC51, 'M', 'هج'), - (0xFC52, 'M', 'هم'), - (0xFC53, 'M', 'هى'), - (0xFC54, 'M', 'هي'), - (0xFC55, 'M', 'يج'), - (0xFC56, 'M', 'يح'), - ] - -def _seg_46() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFC57, 'M', 'يخ'), - (0xFC58, 'M', 'يم'), - (0xFC59, 'M', 'يى'), - (0xFC5A, 'M', 'يي'), - (0xFC5B, 'M', 'ذٰ'), - (0xFC5C, 'M', 'رٰ'), - (0xFC5D, 'M', 'ىٰ'), - (0xFC5E, '3', ' ٌّ'), - (0xFC5F, '3', ' ٍّ'), - (0xFC60, '3', ' َّ'), - (0xFC61, '3', ' ُّ'), - (0xFC62, '3', ' ِّ'), - (0xFC63, '3', ' ّٰ'), - (0xFC64, 'M', 'ئر'), - (0xFC65, 'M', 'ئز'), - (0xFC66, 'M', 'ئم'), - (0xFC67, 'M', 'ئن'), - (0xFC68, 'M', 'ئى'), - (0xFC69, 'M', 'ئي'), - (0xFC6A, 'M', 'بر'), - (0xFC6B, 'M', 'بز'), - (0xFC6C, 'M', 'بم'), - (0xFC6D, 'M', 'بن'), - (0xFC6E, 'M', 'بى'), - (0xFC6F, 'M', 'بي'), - (0xFC70, 'M', 'تر'), - (0xFC71, 'M', 'تز'), - (0xFC72, 'M', 'تم'), - (0xFC73, 'M', 'تن'), - (0xFC74, 'M', 'تى'), - (0xFC75, 'M', 'تي'), - (0xFC76, 'M', 'ثر'), - (0xFC77, 'M', 'ثز'), - (0xFC78, 'M', 'ثم'), - (0xFC79, 'M', 'ثن'), - (0xFC7A, 'M', 'ثى'), - (0xFC7B, 'M', 'ثي'), - (0xFC7C, 'M', 'فى'), - (0xFC7D, 'M', 'في'), - (0xFC7E, 'M', 'قى'), - (0xFC7F, 'M', 'قي'), - (0xFC80, 'M', 'كا'), - (0xFC81, 'M', 'كل'), - (0xFC82, 'M', 'كم'), - (0xFC83, 'M', 'كى'), - (0xFC84, 'M', 'كي'), - (0xFC85, 'M', 'لم'), - (0xFC86, 'M', 'لى'), - (0xFC87, 'M', 'لي'), - (0xFC88, 'M', 'ما'), - (0xFC89, 'M', 'مم'), - (0xFC8A, 'M', 'نر'), - (0xFC8B, 'M', 'نز'), - (0xFC8C, 'M', 'نم'), - (0xFC8D, 'M', 'نن'), - (0xFC8E, 'M', 'نى'), - (0xFC8F, 'M', 'ني'), - (0xFC90, 'M', 'ىٰ'), - (0xFC91, 'M', 'ير'), - (0xFC92, 'M', 'يز'), - (0xFC93, 'M', 'يم'), - (0xFC94, 'M', 'ين'), - (0xFC95, 'M', 'يى'), - (0xFC96, 'M', 'يي'), - (0xFC97, 'M', 'ئج'), - (0xFC98, 'M', 'ئح'), - (0xFC99, 'M', 'ئخ'), - (0xFC9A, 'M', 'ئم'), - (0xFC9B, 'M', 'ئه'), - (0xFC9C, 'M', 'بج'), - (0xFC9D, 'M', 'بح'), - (0xFC9E, 'M', 'بخ'), - (0xFC9F, 'M', 'بم'), - (0xFCA0, 'M', 'به'), - (0xFCA1, 'M', 'تج'), - (0xFCA2, 'M', 'تح'), - (0xFCA3, 'M', 'تخ'), - (0xFCA4, 'M', 'تم'), - (0xFCA5, 'M', 'ته'), - (0xFCA6, 'M', 'ثم'), - (0xFCA7, 'M', 'جح'), - (0xFCA8, 'M', 'جم'), - (0xFCA9, 'M', 'حج'), - (0xFCAA, 'M', 'حم'), - (0xFCAB, 'M', 'خج'), - (0xFCAC, 'M', 'خم'), - (0xFCAD, 'M', 'سج'), - (0xFCAE, 'M', 'سح'), - (0xFCAF, 'M', 'سخ'), - (0xFCB0, 'M', 'سم'), - (0xFCB1, 'M', 'صح'), - (0xFCB2, 'M', 'صخ'), - (0xFCB3, 'M', 'صم'), - (0xFCB4, 'M', 'ضج'), - (0xFCB5, 'M', 'ضح'), - (0xFCB6, 'M', 'ضخ'), - (0xFCB7, 'M', 'ضم'), - (0xFCB8, 'M', 'طح'), - (0xFCB9, 'M', 'ظم'), - (0xFCBA, 'M', 'عج'), - ] - -def _seg_47() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFCBB, 'M', 'عم'), - (0xFCBC, 'M', 'غج'), - (0xFCBD, 'M', 'غم'), - (0xFCBE, 'M', 'فج'), - (0xFCBF, 'M', 'فح'), - (0xFCC0, 'M', 'فخ'), - (0xFCC1, 'M', 'فم'), - (0xFCC2, 'M', 'قح'), - (0xFCC3, 'M', 'قم'), - (0xFCC4, 'M', 'كج'), - (0xFCC5, 'M', 'كح'), - (0xFCC6, 'M', 'كخ'), - (0xFCC7, 'M', 'كل'), - (0xFCC8, 'M', 'كم'), - (0xFCC9, 'M', 'لج'), - (0xFCCA, 'M', 'لح'), - (0xFCCB, 'M', 'لخ'), - (0xFCCC, 'M', 'لم'), - (0xFCCD, 'M', 'له'), - (0xFCCE, 'M', 'مج'), - (0xFCCF, 'M', 'مح'), - (0xFCD0, 'M', 'مخ'), - (0xFCD1, 'M', 'مم'), - (0xFCD2, 'M', 'نج'), - (0xFCD3, 'M', 'نح'), - (0xFCD4, 'M', 'نخ'), - (0xFCD5, 'M', 'نم'), - (0xFCD6, 'M', 'نه'), - (0xFCD7, 'M', 'هج'), - (0xFCD8, 'M', 'هم'), - (0xFCD9, 'M', 'هٰ'), - (0xFCDA, 'M', 'يج'), - (0xFCDB, 'M', 'يح'), - (0xFCDC, 'M', 'يخ'), - (0xFCDD, 'M', 'يم'), - (0xFCDE, 'M', 'يه'), - (0xFCDF, 'M', 'ئم'), - (0xFCE0, 'M', 'ئه'), - (0xFCE1, 'M', 'بم'), - (0xFCE2, 'M', 'به'), - (0xFCE3, 'M', 'تم'), - (0xFCE4, 'M', 'ته'), - (0xFCE5, 'M', 'ثم'), - (0xFCE6, 'M', 'ثه'), - (0xFCE7, 'M', 'سم'), - (0xFCE8, 'M', 'سه'), - (0xFCE9, 'M', 'شم'), - (0xFCEA, 'M', 'شه'), - (0xFCEB, 'M', 'كل'), - (0xFCEC, 'M', 'كم'), - (0xFCED, 'M', 'لم'), - (0xFCEE, 'M', 'نم'), - (0xFCEF, 'M', 'نه'), - (0xFCF0, 'M', 'يم'), - (0xFCF1, 'M', 'يه'), - (0xFCF2, 'M', 'ـَّ'), - (0xFCF3, 'M', 'ـُّ'), - (0xFCF4, 'M', 'ـِّ'), - (0xFCF5, 'M', 'طى'), - (0xFCF6, 'M', 'طي'), - (0xFCF7, 'M', 'عى'), - (0xFCF8, 'M', 'عي'), - (0xFCF9, 'M', 'غى'), - (0xFCFA, 'M', 'غي'), - (0xFCFB, 'M', 'سى'), - (0xFCFC, 'M', 'سي'), - (0xFCFD, 'M', 'شى'), - (0xFCFE, 'M', 'شي'), - (0xFCFF, 'M', 'حى'), - (0xFD00, 'M', 'حي'), - (0xFD01, 'M', 'جى'), - (0xFD02, 'M', 'جي'), - (0xFD03, 'M', 'خى'), - (0xFD04, 'M', 'خي'), - (0xFD05, 'M', 'صى'), - (0xFD06, 'M', 'صي'), - (0xFD07, 'M', 'ضى'), - (0xFD08, 'M', 'ضي'), - (0xFD09, 'M', 'شج'), - (0xFD0A, 'M', 'شح'), - (0xFD0B, 'M', 'شخ'), - (0xFD0C, 'M', 'شم'), - (0xFD0D, 'M', 'شر'), - (0xFD0E, 'M', 'سر'), - (0xFD0F, 'M', 'صر'), - (0xFD10, 'M', 'ضر'), - (0xFD11, 'M', 'طى'), - (0xFD12, 'M', 'طي'), - (0xFD13, 'M', 'عى'), - (0xFD14, 'M', 'عي'), - (0xFD15, 'M', 'غى'), - (0xFD16, 'M', 'غي'), - (0xFD17, 'M', 'سى'), - (0xFD18, 'M', 'سي'), - (0xFD19, 'M', 'شى'), - (0xFD1A, 'M', 'شي'), - (0xFD1B, 'M', 'حى'), - (0xFD1C, 'M', 'حي'), - (0xFD1D, 'M', 'جى'), - (0xFD1E, 'M', 'جي'), - ] - -def _seg_48() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFD1F, 'M', 'خى'), - (0xFD20, 'M', 'خي'), - (0xFD21, 'M', 'صى'), - (0xFD22, 'M', 'صي'), - (0xFD23, 'M', 'ضى'), - (0xFD24, 'M', 'ضي'), - (0xFD25, 'M', 'شج'), - (0xFD26, 'M', 'شح'), - (0xFD27, 'M', 'شخ'), - (0xFD28, 'M', 'شم'), - (0xFD29, 'M', 'شر'), - (0xFD2A, 'M', 'سر'), - (0xFD2B, 'M', 'صر'), - (0xFD2C, 'M', 'ضر'), - (0xFD2D, 'M', 'شج'), - (0xFD2E, 'M', 'شح'), - (0xFD2F, 'M', 'شخ'), - (0xFD30, 'M', 'شم'), - (0xFD31, 'M', 'سه'), - (0xFD32, 'M', 'شه'), - (0xFD33, 'M', 'طم'), - (0xFD34, 'M', 'سج'), - (0xFD35, 'M', 'سح'), - (0xFD36, 'M', 'سخ'), - (0xFD37, 'M', 'شج'), - (0xFD38, 'M', 'شح'), - (0xFD39, 'M', 'شخ'), - (0xFD3A, 'M', 'طم'), - (0xFD3B, 'M', 'ظم'), - (0xFD3C, 'M', 'اً'), - (0xFD3E, 'V'), - (0xFD50, 'M', 'تجم'), - (0xFD51, 'M', 'تحج'), - (0xFD53, 'M', 'تحم'), - (0xFD54, 'M', 'تخم'), - (0xFD55, 'M', 'تمج'), - (0xFD56, 'M', 'تمح'), - (0xFD57, 'M', 'تمخ'), - (0xFD58, 'M', 'جمح'), - (0xFD5A, 'M', 'حمي'), - (0xFD5B, 'M', 'حمى'), - (0xFD5C, 'M', 'سحج'), - (0xFD5D, 'M', 'سجح'), - (0xFD5E, 'M', 'سجى'), - (0xFD5F, 'M', 'سمح'), - (0xFD61, 'M', 'سمج'), - (0xFD62, 'M', 'سمم'), - (0xFD64, 'M', 'صحح'), - (0xFD66, 'M', 'صمم'), - (0xFD67, 'M', 'شحم'), - (0xFD69, 'M', 'شجي'), - (0xFD6A, 'M', 'شمخ'), - (0xFD6C, 'M', 'شمم'), - (0xFD6E, 'M', 'ضحى'), - (0xFD6F, 'M', 'ضخم'), - (0xFD71, 'M', 'طمح'), - (0xFD73, 'M', 'طمم'), - (0xFD74, 'M', 'طمي'), - (0xFD75, 'M', 'عجم'), - (0xFD76, 'M', 'عمم'), - (0xFD78, 'M', 'عمى'), - (0xFD79, 'M', 'غمم'), - (0xFD7A, 'M', 'غمي'), - (0xFD7B, 'M', 'غمى'), - (0xFD7C, 'M', 'فخم'), - (0xFD7E, 'M', 'قمح'), - (0xFD7F, 'M', 'قمم'), - (0xFD80, 'M', 'لحم'), - (0xFD81, 'M', 'لحي'), - (0xFD82, 'M', 'لحى'), - (0xFD83, 'M', 'لجج'), - (0xFD85, 'M', 'لخم'), - (0xFD87, 'M', 'لمح'), - (0xFD89, 'M', 'محج'), - (0xFD8A, 'M', 'محم'), - (0xFD8B, 'M', 'محي'), - (0xFD8C, 'M', 'مجح'), - (0xFD8D, 'M', 'مجم'), - (0xFD8E, 'M', 'مخج'), - (0xFD8F, 'M', 'مخم'), - (0xFD90, 'X'), - (0xFD92, 'M', 'مجخ'), - (0xFD93, 'M', 'همج'), - (0xFD94, 'M', 'همم'), - (0xFD95, 'M', 'نحم'), - (0xFD96, 'M', 'نحى'), - (0xFD97, 'M', 'نجم'), - (0xFD99, 'M', 'نجى'), - (0xFD9A, 'M', 'نمي'), - (0xFD9B, 'M', 'نمى'), - (0xFD9C, 'M', 'يمم'), - (0xFD9E, 'M', 'بخي'), - (0xFD9F, 'M', 'تجي'), - (0xFDA0, 'M', 'تجى'), - (0xFDA1, 'M', 'تخي'), - (0xFDA2, 'M', 'تخى'), - (0xFDA3, 'M', 'تمي'), - (0xFDA4, 'M', 'تمى'), - (0xFDA5, 'M', 'جمي'), - (0xFDA6, 'M', 'جحى'), - ] - -def _seg_49() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFDA7, 'M', 'جمى'), - (0xFDA8, 'M', 'سخى'), - (0xFDA9, 'M', 'صحي'), - (0xFDAA, 'M', 'شحي'), - (0xFDAB, 'M', 'ضحي'), - (0xFDAC, 'M', 'لجي'), - (0xFDAD, 'M', 'لمي'), - (0xFDAE, 'M', 'يحي'), - (0xFDAF, 'M', 'يجي'), - (0xFDB0, 'M', 'يمي'), - (0xFDB1, 'M', 'ممي'), - (0xFDB2, 'M', 'قمي'), - (0xFDB3, 'M', 'نحي'), - (0xFDB4, 'M', 'قمح'), - (0xFDB5, 'M', 'لحم'), - (0xFDB6, 'M', 'عمي'), - (0xFDB7, 'M', 'كمي'), - (0xFDB8, 'M', 'نجح'), - (0xFDB9, 'M', 'مخي'), - (0xFDBA, 'M', 'لجم'), - (0xFDBB, 'M', 'كمم'), - (0xFDBC, 'M', 'لجم'), - (0xFDBD, 'M', 'نجح'), - (0xFDBE, 'M', 'جحي'), - (0xFDBF, 'M', 'حجي'), - (0xFDC0, 'M', 'مجي'), - (0xFDC1, 'M', 'فمي'), - (0xFDC2, 'M', 'بحي'), - (0xFDC3, 'M', 'كمم'), - (0xFDC4, 'M', 'عجم'), - (0xFDC5, 'M', 'صمم'), - (0xFDC6, 'M', 'سخي'), - (0xFDC7, 'M', 'نجي'), - (0xFDC8, 'X'), - (0xFDCF, 'V'), - (0xFDD0, 'X'), - (0xFDF0, 'M', 'صلے'), - (0xFDF1, 'M', 'قلے'), - (0xFDF2, 'M', 'الله'), - (0xFDF3, 'M', 'اكبر'), - (0xFDF4, 'M', 'محمد'), - (0xFDF5, 'M', 'صلعم'), - (0xFDF6, 'M', 'رسول'), - (0xFDF7, 'M', 'عليه'), - (0xFDF8, 'M', 'وسلم'), - (0xFDF9, 'M', 'صلى'), - (0xFDFA, '3', 'صلى الله عليه وسلم'), - (0xFDFB, '3', 'جل جلاله'), - (0xFDFC, 'M', 'ریال'), - (0xFDFD, 'V'), - (0xFE00, 'I'), - (0xFE10, '3', ','), - (0xFE11, 'M', '、'), - (0xFE12, 'X'), - (0xFE13, '3', ':'), - (0xFE14, '3', ';'), - (0xFE15, '3', '!'), - (0xFE16, '3', '?'), - (0xFE17, 'M', '〖'), - (0xFE18, 'M', '〗'), - (0xFE19, 'X'), - (0xFE20, 'V'), - (0xFE30, 'X'), - (0xFE31, 'M', '—'), - (0xFE32, 'M', '–'), - (0xFE33, '3', '_'), - (0xFE35, '3', '('), - (0xFE36, '3', ')'), - (0xFE37, '3', '{'), - (0xFE38, '3', '}'), - (0xFE39, 'M', '〔'), - (0xFE3A, 'M', '〕'), - (0xFE3B, 'M', '【'), - (0xFE3C, 'M', '】'), - (0xFE3D, 'M', '《'), - (0xFE3E, 'M', '》'), - (0xFE3F, 'M', '〈'), - (0xFE40, 'M', '〉'), - (0xFE41, 'M', '「'), - (0xFE42, 'M', '」'), - (0xFE43, 'M', '『'), - (0xFE44, 'M', '』'), - (0xFE45, 'V'), - (0xFE47, '3', '['), - (0xFE48, '3', ']'), - (0xFE49, '3', ' ̅'), - (0xFE4D, '3', '_'), - (0xFE50, '3', ','), - (0xFE51, 'M', '、'), - (0xFE52, 'X'), - (0xFE54, '3', ';'), - (0xFE55, '3', ':'), - (0xFE56, '3', '?'), - (0xFE57, '3', '!'), - (0xFE58, 'M', '—'), - (0xFE59, '3', '('), - (0xFE5A, '3', ')'), - (0xFE5B, '3', '{'), - (0xFE5C, '3', '}'), - (0xFE5D, 'M', '〔'), - ] - -def _seg_50() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFE5E, 'M', '〕'), - (0xFE5F, '3', '#'), - (0xFE60, '3', '&'), - (0xFE61, '3', '*'), - (0xFE62, '3', '+'), - (0xFE63, 'M', '-'), - (0xFE64, '3', '<'), - (0xFE65, '3', '>'), - (0xFE66, '3', '='), - (0xFE67, 'X'), - (0xFE68, '3', '\\'), - (0xFE69, '3', '$'), - (0xFE6A, '3', '%'), - (0xFE6B, '3', '@'), - (0xFE6C, 'X'), - (0xFE70, '3', ' ً'), - (0xFE71, 'M', 'ـً'), - (0xFE72, '3', ' ٌ'), - (0xFE73, 'V'), - (0xFE74, '3', ' ٍ'), - (0xFE75, 'X'), - (0xFE76, '3', ' َ'), - (0xFE77, 'M', 'ـَ'), - (0xFE78, '3', ' ُ'), - (0xFE79, 'M', 'ـُ'), - (0xFE7A, '3', ' ِ'), - (0xFE7B, 'M', 'ـِ'), - (0xFE7C, '3', ' ّ'), - (0xFE7D, 'M', 'ـّ'), - (0xFE7E, '3', ' ْ'), - (0xFE7F, 'M', 'ـْ'), - (0xFE80, 'M', 'ء'), - (0xFE81, 'M', 'آ'), - (0xFE83, 'M', 'أ'), - (0xFE85, 'M', 'ؤ'), - (0xFE87, 'M', 'إ'), - (0xFE89, 'M', 'ئ'), - (0xFE8D, 'M', 'ا'), - (0xFE8F, 'M', 'ب'), - (0xFE93, 'M', 'ة'), - (0xFE95, 'M', 'ت'), - (0xFE99, 'M', 'ث'), - (0xFE9D, 'M', 'ج'), - (0xFEA1, 'M', 'ح'), - (0xFEA5, 'M', 'خ'), - (0xFEA9, 'M', 'د'), - (0xFEAB, 'M', 'ذ'), - (0xFEAD, 'M', 'ر'), - (0xFEAF, 'M', 'ز'), - (0xFEB1, 'M', 'س'), - (0xFEB5, 'M', 'ش'), - (0xFEB9, 'M', 'ص'), - (0xFEBD, 'M', 'ض'), - (0xFEC1, 'M', 'ط'), - (0xFEC5, 'M', 'ظ'), - (0xFEC9, 'M', 'ع'), - (0xFECD, 'M', 'غ'), - (0xFED1, 'M', 'ف'), - (0xFED5, 'M', 'ق'), - (0xFED9, 'M', 'ك'), - (0xFEDD, 'M', 'ل'), - (0xFEE1, 'M', 'م'), - (0xFEE5, 'M', 'ن'), - (0xFEE9, 'M', 'ه'), - (0xFEED, 'M', 'و'), - (0xFEEF, 'M', 'ى'), - (0xFEF1, 'M', 'ي'), - (0xFEF5, 'M', 'لآ'), - (0xFEF7, 'M', 'لأ'), - (0xFEF9, 'M', 'لإ'), - (0xFEFB, 'M', 'لا'), - (0xFEFD, 'X'), - (0xFEFF, 'I'), - (0xFF00, 'X'), - (0xFF01, '3', '!'), - (0xFF02, '3', '"'), - (0xFF03, '3', '#'), - (0xFF04, '3', '$'), - (0xFF05, '3', '%'), - (0xFF06, '3', '&'), - (0xFF07, '3', '\''), - (0xFF08, '3', '('), - (0xFF09, '3', ')'), - (0xFF0A, '3', '*'), - (0xFF0B, '3', '+'), - (0xFF0C, '3', ','), - (0xFF0D, 'M', '-'), - (0xFF0E, 'M', '.'), - (0xFF0F, '3', '/'), - (0xFF10, 'M', '0'), - (0xFF11, 'M', '1'), - (0xFF12, 'M', '2'), - (0xFF13, 'M', '3'), - (0xFF14, 'M', '4'), - (0xFF15, 'M', '5'), - (0xFF16, 'M', '6'), - (0xFF17, 'M', '7'), - (0xFF18, 'M', '8'), - (0xFF19, 'M', '9'), - (0xFF1A, '3', ':'), - ] - -def _seg_51() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFF1B, '3', ';'), - (0xFF1C, '3', '<'), - (0xFF1D, '3', '='), - (0xFF1E, '3', '>'), - (0xFF1F, '3', '?'), - (0xFF20, '3', '@'), - (0xFF21, 'M', 'a'), - (0xFF22, 'M', 'b'), - (0xFF23, 'M', 'c'), - (0xFF24, 'M', 'd'), - (0xFF25, 'M', 'e'), - (0xFF26, 'M', 'f'), - (0xFF27, 'M', 'g'), - (0xFF28, 'M', 'h'), - (0xFF29, 'M', 'i'), - (0xFF2A, 'M', 'j'), - (0xFF2B, 'M', 'k'), - (0xFF2C, 'M', 'l'), - (0xFF2D, 'M', 'm'), - (0xFF2E, 'M', 'n'), - (0xFF2F, 'M', 'o'), - (0xFF30, 'M', 'p'), - (0xFF31, 'M', 'q'), - (0xFF32, 'M', 'r'), - (0xFF33, 'M', 's'), - (0xFF34, 'M', 't'), - (0xFF35, 'M', 'u'), - (0xFF36, 'M', 'v'), - (0xFF37, 'M', 'w'), - (0xFF38, 'M', 'x'), - (0xFF39, 'M', 'y'), - (0xFF3A, 'M', 'z'), - (0xFF3B, '3', '['), - (0xFF3C, '3', '\\'), - (0xFF3D, '3', ']'), - (0xFF3E, '3', '^'), - (0xFF3F, '3', '_'), - (0xFF40, '3', '`'), - (0xFF41, 'M', 'a'), - (0xFF42, 'M', 'b'), - (0xFF43, 'M', 'c'), - (0xFF44, 'M', 'd'), - (0xFF45, 'M', 'e'), - (0xFF46, 'M', 'f'), - (0xFF47, 'M', 'g'), - (0xFF48, 'M', 'h'), - (0xFF49, 'M', 'i'), - (0xFF4A, 'M', 'j'), - (0xFF4B, 'M', 'k'), - (0xFF4C, 'M', 'l'), - (0xFF4D, 'M', 'm'), - (0xFF4E, 'M', 'n'), - (0xFF4F, 'M', 'o'), - (0xFF50, 'M', 'p'), - (0xFF51, 'M', 'q'), - (0xFF52, 'M', 'r'), - (0xFF53, 'M', 's'), - (0xFF54, 'M', 't'), - (0xFF55, 'M', 'u'), - (0xFF56, 'M', 'v'), - (0xFF57, 'M', 'w'), - (0xFF58, 'M', 'x'), - (0xFF59, 'M', 'y'), - (0xFF5A, 'M', 'z'), - (0xFF5B, '3', '{'), - (0xFF5C, '3', '|'), - (0xFF5D, '3', '}'), - (0xFF5E, '3', '~'), - (0xFF5F, 'M', '⦅'), - (0xFF60, 'M', '⦆'), - (0xFF61, 'M', '.'), - (0xFF62, 'M', '「'), - (0xFF63, 'M', '」'), - (0xFF64, 'M', '、'), - (0xFF65, 'M', '・'), - (0xFF66, 'M', 'ヲ'), - (0xFF67, 'M', 'ァ'), - (0xFF68, 'M', 'ィ'), - (0xFF69, 'M', 'ゥ'), - (0xFF6A, 'M', 'ェ'), - (0xFF6B, 'M', 'ォ'), - (0xFF6C, 'M', 'ャ'), - (0xFF6D, 'M', 'ュ'), - (0xFF6E, 'M', 'ョ'), - (0xFF6F, 'M', 'ッ'), - (0xFF70, 'M', 'ー'), - (0xFF71, 'M', 'ア'), - (0xFF72, 'M', 'イ'), - (0xFF73, 'M', 'ウ'), - (0xFF74, 'M', 'エ'), - (0xFF75, 'M', 'オ'), - (0xFF76, 'M', 'カ'), - (0xFF77, 'M', 'キ'), - (0xFF78, 'M', 'ク'), - (0xFF79, 'M', 'ケ'), - (0xFF7A, 'M', 'コ'), - (0xFF7B, 'M', 'サ'), - (0xFF7C, 'M', 'シ'), - (0xFF7D, 'M', 'ス'), - (0xFF7E, 'M', 'セ'), - ] - -def _seg_52() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFF7F, 'M', 'ソ'), - (0xFF80, 'M', 'タ'), - (0xFF81, 'M', 'チ'), - (0xFF82, 'M', 'ツ'), - (0xFF83, 'M', 'テ'), - (0xFF84, 'M', 'ト'), - (0xFF85, 'M', 'ナ'), - (0xFF86, 'M', 'ニ'), - (0xFF87, 'M', 'ヌ'), - (0xFF88, 'M', 'ネ'), - (0xFF89, 'M', 'ノ'), - (0xFF8A, 'M', 'ハ'), - (0xFF8B, 'M', 'ヒ'), - (0xFF8C, 'M', 'フ'), - (0xFF8D, 'M', 'ヘ'), - (0xFF8E, 'M', 'ホ'), - (0xFF8F, 'M', 'マ'), - (0xFF90, 'M', 'ミ'), - (0xFF91, 'M', 'ム'), - (0xFF92, 'M', 'メ'), - (0xFF93, 'M', 'モ'), - (0xFF94, 'M', 'ヤ'), - (0xFF95, 'M', 'ユ'), - (0xFF96, 'M', 'ヨ'), - (0xFF97, 'M', 'ラ'), - (0xFF98, 'M', 'リ'), - (0xFF99, 'M', 'ル'), - (0xFF9A, 'M', 'レ'), - (0xFF9B, 'M', 'ロ'), - (0xFF9C, 'M', 'ワ'), - (0xFF9D, 'M', 'ン'), - (0xFF9E, 'M', '゙'), - (0xFF9F, 'M', '゚'), - (0xFFA0, 'X'), - (0xFFA1, 'M', 'ᄀ'), - (0xFFA2, 'M', 'ᄁ'), - (0xFFA3, 'M', 'ᆪ'), - (0xFFA4, 'M', 'ᄂ'), - (0xFFA5, 'M', 'ᆬ'), - (0xFFA6, 'M', 'ᆭ'), - (0xFFA7, 'M', 'ᄃ'), - (0xFFA8, 'M', 'ᄄ'), - (0xFFA9, 'M', 'ᄅ'), - (0xFFAA, 'M', 'ᆰ'), - (0xFFAB, 'M', 'ᆱ'), - (0xFFAC, 'M', 'ᆲ'), - (0xFFAD, 'M', 'ᆳ'), - (0xFFAE, 'M', 'ᆴ'), - (0xFFAF, 'M', 'ᆵ'), - (0xFFB0, 'M', 'ᄚ'), - (0xFFB1, 'M', 'ᄆ'), - (0xFFB2, 'M', 'ᄇ'), - (0xFFB3, 'M', 'ᄈ'), - (0xFFB4, 'M', 'ᄡ'), - (0xFFB5, 'M', 'ᄉ'), - (0xFFB6, 'M', 'ᄊ'), - (0xFFB7, 'M', 'ᄋ'), - (0xFFB8, 'M', 'ᄌ'), - (0xFFB9, 'M', 'ᄍ'), - (0xFFBA, 'M', 'ᄎ'), - (0xFFBB, 'M', 'ᄏ'), - (0xFFBC, 'M', 'ᄐ'), - (0xFFBD, 'M', 'ᄑ'), - (0xFFBE, 'M', 'ᄒ'), - (0xFFBF, 'X'), - (0xFFC2, 'M', 'ᅡ'), - (0xFFC3, 'M', 'ᅢ'), - (0xFFC4, 'M', 'ᅣ'), - (0xFFC5, 'M', 'ᅤ'), - (0xFFC6, 'M', 'ᅥ'), - (0xFFC7, 'M', 'ᅦ'), - (0xFFC8, 'X'), - (0xFFCA, 'M', 'ᅧ'), - (0xFFCB, 'M', 'ᅨ'), - (0xFFCC, 'M', 'ᅩ'), - (0xFFCD, 'M', 'ᅪ'), - (0xFFCE, 'M', 'ᅫ'), - (0xFFCF, 'M', 'ᅬ'), - (0xFFD0, 'X'), - (0xFFD2, 'M', 'ᅭ'), - (0xFFD3, 'M', 'ᅮ'), - (0xFFD4, 'M', 'ᅯ'), - (0xFFD5, 'M', 'ᅰ'), - (0xFFD6, 'M', 'ᅱ'), - (0xFFD7, 'M', 'ᅲ'), - (0xFFD8, 'X'), - (0xFFDA, 'M', 'ᅳ'), - (0xFFDB, 'M', 'ᅴ'), - (0xFFDC, 'M', 'ᅵ'), - (0xFFDD, 'X'), - (0xFFE0, 'M', '¢'), - (0xFFE1, 'M', '£'), - (0xFFE2, 'M', '¬'), - (0xFFE3, '3', ' ̄'), - (0xFFE4, 'M', '¦'), - (0xFFE5, 'M', '¥'), - (0xFFE6, 'M', '₩'), - (0xFFE7, 'X'), - (0xFFE8, 'M', '│'), - (0xFFE9, 'M', '←'), - ] - -def _seg_53() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0xFFEA, 'M', '↑'), - (0xFFEB, 'M', '→'), - (0xFFEC, 'M', '↓'), - (0xFFED, 'M', '■'), - (0xFFEE, 'M', '○'), - (0xFFEF, 'X'), - (0x10000, 'V'), - (0x1000C, 'X'), - (0x1000D, 'V'), - (0x10027, 'X'), - (0x10028, 'V'), - (0x1003B, 'X'), - (0x1003C, 'V'), - (0x1003E, 'X'), - (0x1003F, 'V'), - (0x1004E, 'X'), - (0x10050, 'V'), - (0x1005E, 'X'), - (0x10080, 'V'), - (0x100FB, 'X'), - (0x10100, 'V'), - (0x10103, 'X'), - (0x10107, 'V'), - (0x10134, 'X'), - (0x10137, 'V'), - (0x1018F, 'X'), - (0x10190, 'V'), - (0x1019D, 'X'), - (0x101A0, 'V'), - (0x101A1, 'X'), - (0x101D0, 'V'), - (0x101FE, 'X'), - (0x10280, 'V'), - (0x1029D, 'X'), - (0x102A0, 'V'), - (0x102D1, 'X'), - (0x102E0, 'V'), - (0x102FC, 'X'), - (0x10300, 'V'), - (0x10324, 'X'), - (0x1032D, 'V'), - (0x1034B, 'X'), - (0x10350, 'V'), - (0x1037B, 'X'), - (0x10380, 'V'), - (0x1039E, 'X'), - (0x1039F, 'V'), - (0x103C4, 'X'), - (0x103C8, 'V'), - (0x103D6, 'X'), - (0x10400, 'M', '𐐨'), - (0x10401, 'M', '𐐩'), - (0x10402, 'M', '𐐪'), - (0x10403, 'M', '𐐫'), - (0x10404, 'M', '𐐬'), - (0x10405, 'M', '𐐭'), - (0x10406, 'M', '𐐮'), - (0x10407, 'M', '𐐯'), - (0x10408, 'M', '𐐰'), - (0x10409, 'M', '𐐱'), - (0x1040A, 'M', '𐐲'), - (0x1040B, 'M', '𐐳'), - (0x1040C, 'M', '𐐴'), - (0x1040D, 'M', '𐐵'), - (0x1040E, 'M', '𐐶'), - (0x1040F, 'M', '𐐷'), - (0x10410, 'M', '𐐸'), - (0x10411, 'M', '𐐹'), - (0x10412, 'M', '𐐺'), - (0x10413, 'M', '𐐻'), - (0x10414, 'M', '𐐼'), - (0x10415, 'M', '𐐽'), - (0x10416, 'M', '𐐾'), - (0x10417, 'M', '𐐿'), - (0x10418, 'M', '𐑀'), - (0x10419, 'M', '𐑁'), - (0x1041A, 'M', '𐑂'), - (0x1041B, 'M', '𐑃'), - (0x1041C, 'M', '𐑄'), - (0x1041D, 'M', '𐑅'), - (0x1041E, 'M', '𐑆'), - (0x1041F, 'M', '𐑇'), - (0x10420, 'M', '𐑈'), - (0x10421, 'M', '𐑉'), - (0x10422, 'M', '𐑊'), - (0x10423, 'M', '𐑋'), - (0x10424, 'M', '𐑌'), - (0x10425, 'M', '𐑍'), - (0x10426, 'M', '𐑎'), - (0x10427, 'M', '𐑏'), - (0x10428, 'V'), - (0x1049E, 'X'), - (0x104A0, 'V'), - (0x104AA, 'X'), - (0x104B0, 'M', '𐓘'), - (0x104B1, 'M', '𐓙'), - (0x104B2, 'M', '𐓚'), - (0x104B3, 'M', '𐓛'), - (0x104B4, 'M', '𐓜'), - (0x104B5, 'M', '𐓝'), - ] - -def _seg_54() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x104B6, 'M', '𐓞'), - (0x104B7, 'M', '𐓟'), - (0x104B8, 'M', '𐓠'), - (0x104B9, 'M', '𐓡'), - (0x104BA, 'M', '𐓢'), - (0x104BB, 'M', '𐓣'), - (0x104BC, 'M', '𐓤'), - (0x104BD, 'M', '𐓥'), - (0x104BE, 'M', '𐓦'), - (0x104BF, 'M', '𐓧'), - (0x104C0, 'M', '𐓨'), - (0x104C1, 'M', '𐓩'), - (0x104C2, 'M', '𐓪'), - (0x104C3, 'M', '𐓫'), - (0x104C4, 'M', '𐓬'), - (0x104C5, 'M', '𐓭'), - (0x104C6, 'M', '𐓮'), - (0x104C7, 'M', '𐓯'), - (0x104C8, 'M', '𐓰'), - (0x104C9, 'M', '𐓱'), - (0x104CA, 'M', '𐓲'), - (0x104CB, 'M', '𐓳'), - (0x104CC, 'M', '𐓴'), - (0x104CD, 'M', '𐓵'), - (0x104CE, 'M', '𐓶'), - (0x104CF, 'M', '𐓷'), - (0x104D0, 'M', '𐓸'), - (0x104D1, 'M', '𐓹'), - (0x104D2, 'M', '𐓺'), - (0x104D3, 'M', '𐓻'), - (0x104D4, 'X'), - (0x104D8, 'V'), - (0x104FC, 'X'), - (0x10500, 'V'), - (0x10528, 'X'), - (0x10530, 'V'), - (0x10564, 'X'), - (0x1056F, 'V'), - (0x10570, 'M', '𐖗'), - (0x10571, 'M', '𐖘'), - (0x10572, 'M', '𐖙'), - (0x10573, 'M', '𐖚'), - (0x10574, 'M', '𐖛'), - (0x10575, 'M', '𐖜'), - (0x10576, 'M', '𐖝'), - (0x10577, 'M', '𐖞'), - (0x10578, 'M', '𐖟'), - (0x10579, 'M', '𐖠'), - (0x1057A, 'M', '𐖡'), - (0x1057B, 'X'), - (0x1057C, 'M', '𐖣'), - (0x1057D, 'M', '𐖤'), - (0x1057E, 'M', '𐖥'), - (0x1057F, 'M', '𐖦'), - (0x10580, 'M', '𐖧'), - (0x10581, 'M', '𐖨'), - (0x10582, 'M', '𐖩'), - (0x10583, 'M', '𐖪'), - (0x10584, 'M', '𐖫'), - (0x10585, 'M', '𐖬'), - (0x10586, 'M', '𐖭'), - (0x10587, 'M', '𐖮'), - (0x10588, 'M', '𐖯'), - (0x10589, 'M', '𐖰'), - (0x1058A, 'M', '𐖱'), - (0x1058B, 'X'), - (0x1058C, 'M', '𐖳'), - (0x1058D, 'M', '𐖴'), - (0x1058E, 'M', '𐖵'), - (0x1058F, 'M', '𐖶'), - (0x10590, 'M', '𐖷'), - (0x10591, 'M', '𐖸'), - (0x10592, 'M', '𐖹'), - (0x10593, 'X'), - (0x10594, 'M', '𐖻'), - (0x10595, 'M', '𐖼'), - (0x10596, 'X'), - (0x10597, 'V'), - (0x105A2, 'X'), - (0x105A3, 'V'), - (0x105B2, 'X'), - (0x105B3, 'V'), - (0x105BA, 'X'), - (0x105BB, 'V'), - (0x105BD, 'X'), - (0x10600, 'V'), - (0x10737, 'X'), - (0x10740, 'V'), - (0x10756, 'X'), - (0x10760, 'V'), - (0x10768, 'X'), - (0x10780, 'V'), - (0x10781, 'M', 'ː'), - (0x10782, 'M', 'ˑ'), - (0x10783, 'M', 'æ'), - (0x10784, 'M', 'ʙ'), - (0x10785, 'M', 'ɓ'), - (0x10786, 'X'), - (0x10787, 'M', 'ʣ'), - (0x10788, 'M', 'ꭦ'), - ] - -def _seg_55() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x10789, 'M', 'ʥ'), - (0x1078A, 'M', 'ʤ'), - (0x1078B, 'M', 'ɖ'), - (0x1078C, 'M', 'ɗ'), - (0x1078D, 'M', 'ᶑ'), - (0x1078E, 'M', 'ɘ'), - (0x1078F, 'M', 'ɞ'), - (0x10790, 'M', 'ʩ'), - (0x10791, 'M', 'ɤ'), - (0x10792, 'M', 'ɢ'), - (0x10793, 'M', 'ɠ'), - (0x10794, 'M', 'ʛ'), - (0x10795, 'M', 'ħ'), - (0x10796, 'M', 'ʜ'), - (0x10797, 'M', 'ɧ'), - (0x10798, 'M', 'ʄ'), - (0x10799, 'M', 'ʪ'), - (0x1079A, 'M', 'ʫ'), - (0x1079B, 'M', 'ɬ'), - (0x1079C, 'M', '𝼄'), - (0x1079D, 'M', 'ꞎ'), - (0x1079E, 'M', 'ɮ'), - (0x1079F, 'M', '𝼅'), - (0x107A0, 'M', 'ʎ'), - (0x107A1, 'M', '𝼆'), - (0x107A2, 'M', 'ø'), - (0x107A3, 'M', 'ɶ'), - (0x107A4, 'M', 'ɷ'), - (0x107A5, 'M', 'q'), - (0x107A6, 'M', 'ɺ'), - (0x107A7, 'M', '𝼈'), - (0x107A8, 'M', 'ɽ'), - (0x107A9, 'M', 'ɾ'), - (0x107AA, 'M', 'ʀ'), - (0x107AB, 'M', 'ʨ'), - (0x107AC, 'M', 'ʦ'), - (0x107AD, 'M', 'ꭧ'), - (0x107AE, 'M', 'ʧ'), - (0x107AF, 'M', 'ʈ'), - (0x107B0, 'M', 'ⱱ'), - (0x107B1, 'X'), - (0x107B2, 'M', 'ʏ'), - (0x107B3, 'M', 'ʡ'), - (0x107B4, 'M', 'ʢ'), - (0x107B5, 'M', 'ʘ'), - (0x107B6, 'M', 'ǀ'), - (0x107B7, 'M', 'ǁ'), - (0x107B8, 'M', 'ǂ'), - (0x107B9, 'M', '𝼊'), - (0x107BA, 'M', '𝼞'), - (0x107BB, 'X'), - (0x10800, 'V'), - (0x10806, 'X'), - (0x10808, 'V'), - (0x10809, 'X'), - (0x1080A, 'V'), - (0x10836, 'X'), - (0x10837, 'V'), - (0x10839, 'X'), - (0x1083C, 'V'), - (0x1083D, 'X'), - (0x1083F, 'V'), - (0x10856, 'X'), - (0x10857, 'V'), - (0x1089F, 'X'), - (0x108A7, 'V'), - (0x108B0, 'X'), - (0x108E0, 'V'), - (0x108F3, 'X'), - (0x108F4, 'V'), - (0x108F6, 'X'), - (0x108FB, 'V'), - (0x1091C, 'X'), - (0x1091F, 'V'), - (0x1093A, 'X'), - (0x1093F, 'V'), - (0x10940, 'X'), - (0x10980, 'V'), - (0x109B8, 'X'), - (0x109BC, 'V'), - (0x109D0, 'X'), - (0x109D2, 'V'), - (0x10A04, 'X'), - (0x10A05, 'V'), - (0x10A07, 'X'), - (0x10A0C, 'V'), - (0x10A14, 'X'), - (0x10A15, 'V'), - (0x10A18, 'X'), - (0x10A19, 'V'), - (0x10A36, 'X'), - (0x10A38, 'V'), - (0x10A3B, 'X'), - (0x10A3F, 'V'), - (0x10A49, 'X'), - (0x10A50, 'V'), - (0x10A59, 'X'), - (0x10A60, 'V'), - (0x10AA0, 'X'), - (0x10AC0, 'V'), - ] - -def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x10AE7, 'X'), - (0x10AEB, 'V'), - (0x10AF7, 'X'), - (0x10B00, 'V'), - (0x10B36, 'X'), - (0x10B39, 'V'), - (0x10B56, 'X'), - (0x10B58, 'V'), - (0x10B73, 'X'), - (0x10B78, 'V'), - (0x10B92, 'X'), - (0x10B99, 'V'), - (0x10B9D, 'X'), - (0x10BA9, 'V'), - (0x10BB0, 'X'), - (0x10C00, 'V'), - (0x10C49, 'X'), - (0x10C80, 'M', '𐳀'), - (0x10C81, 'M', '𐳁'), - (0x10C82, 'M', '𐳂'), - (0x10C83, 'M', '𐳃'), - (0x10C84, 'M', '𐳄'), - (0x10C85, 'M', '𐳅'), - (0x10C86, 'M', '𐳆'), - (0x10C87, 'M', '𐳇'), - (0x10C88, 'M', '𐳈'), - (0x10C89, 'M', '𐳉'), - (0x10C8A, 'M', '𐳊'), - (0x10C8B, 'M', '𐳋'), - (0x10C8C, 'M', '𐳌'), - (0x10C8D, 'M', '𐳍'), - (0x10C8E, 'M', '𐳎'), - (0x10C8F, 'M', '𐳏'), - (0x10C90, 'M', '𐳐'), - (0x10C91, 'M', '𐳑'), - (0x10C92, 'M', '𐳒'), - (0x10C93, 'M', '𐳓'), - (0x10C94, 'M', '𐳔'), - (0x10C95, 'M', '𐳕'), - (0x10C96, 'M', '𐳖'), - (0x10C97, 'M', '𐳗'), - (0x10C98, 'M', '𐳘'), - (0x10C99, 'M', '𐳙'), - (0x10C9A, 'M', '𐳚'), - (0x10C9B, 'M', '𐳛'), - (0x10C9C, 'M', '𐳜'), - (0x10C9D, 'M', '𐳝'), - (0x10C9E, 'M', '𐳞'), - (0x10C9F, 'M', '𐳟'), - (0x10CA0, 'M', '𐳠'), - (0x10CA1, 'M', '𐳡'), - (0x10CA2, 'M', '𐳢'), - (0x10CA3, 'M', '𐳣'), - (0x10CA4, 'M', '𐳤'), - (0x10CA5, 'M', '𐳥'), - (0x10CA6, 'M', '𐳦'), - (0x10CA7, 'M', '𐳧'), - (0x10CA8, 'M', '𐳨'), - (0x10CA9, 'M', '𐳩'), - (0x10CAA, 'M', '𐳪'), - (0x10CAB, 'M', '𐳫'), - (0x10CAC, 'M', '𐳬'), - (0x10CAD, 'M', '𐳭'), - (0x10CAE, 'M', '𐳮'), - (0x10CAF, 'M', '𐳯'), - (0x10CB0, 'M', '𐳰'), - (0x10CB1, 'M', '𐳱'), - (0x10CB2, 'M', '𐳲'), - (0x10CB3, 'X'), - (0x10CC0, 'V'), - (0x10CF3, 'X'), - (0x10CFA, 'V'), - (0x10D28, 'X'), - (0x10D30, 'V'), - (0x10D3A, 'X'), - (0x10E60, 'V'), - (0x10E7F, 'X'), - (0x10E80, 'V'), - (0x10EAA, 'X'), - (0x10EAB, 'V'), - (0x10EAE, 'X'), - (0x10EB0, 'V'), - (0x10EB2, 'X'), - (0x10EFD, 'V'), - (0x10F28, 'X'), - (0x10F30, 'V'), - (0x10F5A, 'X'), - (0x10F70, 'V'), - (0x10F8A, 'X'), - (0x10FB0, 'V'), - (0x10FCC, 'X'), - (0x10FE0, 'V'), - (0x10FF7, 'X'), - (0x11000, 'V'), - (0x1104E, 'X'), - (0x11052, 'V'), - (0x11076, 'X'), - (0x1107F, 'V'), - (0x110BD, 'X'), - (0x110BE, 'V'), - ] - -def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x110C3, 'X'), - (0x110D0, 'V'), - (0x110E9, 'X'), - (0x110F0, 'V'), - (0x110FA, 'X'), - (0x11100, 'V'), - (0x11135, 'X'), - (0x11136, 'V'), - (0x11148, 'X'), - (0x11150, 'V'), - (0x11177, 'X'), - (0x11180, 'V'), - (0x111E0, 'X'), - (0x111E1, 'V'), - (0x111F5, 'X'), - (0x11200, 'V'), - (0x11212, 'X'), - (0x11213, 'V'), - (0x11242, 'X'), - (0x11280, 'V'), - (0x11287, 'X'), - (0x11288, 'V'), - (0x11289, 'X'), - (0x1128A, 'V'), - (0x1128E, 'X'), - (0x1128F, 'V'), - (0x1129E, 'X'), - (0x1129F, 'V'), - (0x112AA, 'X'), - (0x112B0, 'V'), - (0x112EB, 'X'), - (0x112F0, 'V'), - (0x112FA, 'X'), - (0x11300, 'V'), - (0x11304, 'X'), - (0x11305, 'V'), - (0x1130D, 'X'), - (0x1130F, 'V'), - (0x11311, 'X'), - (0x11313, 'V'), - (0x11329, 'X'), - (0x1132A, 'V'), - (0x11331, 'X'), - (0x11332, 'V'), - (0x11334, 'X'), - (0x11335, 'V'), - (0x1133A, 'X'), - (0x1133B, 'V'), - (0x11345, 'X'), - (0x11347, 'V'), - (0x11349, 'X'), - (0x1134B, 'V'), - (0x1134E, 'X'), - (0x11350, 'V'), - (0x11351, 'X'), - (0x11357, 'V'), - (0x11358, 'X'), - (0x1135D, 'V'), - (0x11364, 'X'), - (0x11366, 'V'), - (0x1136D, 'X'), - (0x11370, 'V'), - (0x11375, 'X'), - (0x11400, 'V'), - (0x1145C, 'X'), - (0x1145D, 'V'), - (0x11462, 'X'), - (0x11480, 'V'), - (0x114C8, 'X'), - (0x114D0, 'V'), - (0x114DA, 'X'), - (0x11580, 'V'), - (0x115B6, 'X'), - (0x115B8, 'V'), - (0x115DE, 'X'), - (0x11600, 'V'), - (0x11645, 'X'), - (0x11650, 'V'), - (0x1165A, 'X'), - (0x11660, 'V'), - (0x1166D, 'X'), - (0x11680, 'V'), - (0x116BA, 'X'), - (0x116C0, 'V'), - (0x116CA, 'X'), - (0x11700, 'V'), - (0x1171B, 'X'), - (0x1171D, 'V'), - (0x1172C, 'X'), - (0x11730, 'V'), - (0x11747, 'X'), - (0x11800, 'V'), - (0x1183C, 'X'), - (0x118A0, 'M', '𑣀'), - (0x118A1, 'M', '𑣁'), - (0x118A2, 'M', '𑣂'), - (0x118A3, 'M', '𑣃'), - (0x118A4, 'M', '𑣄'), - (0x118A5, 'M', '𑣅'), - (0x118A6, 'M', '𑣆'), - ] - -def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x118A7, 'M', '𑣇'), - (0x118A8, 'M', '𑣈'), - (0x118A9, 'M', '𑣉'), - (0x118AA, 'M', '𑣊'), - (0x118AB, 'M', '𑣋'), - (0x118AC, 'M', '𑣌'), - (0x118AD, 'M', '𑣍'), - (0x118AE, 'M', '𑣎'), - (0x118AF, 'M', '𑣏'), - (0x118B0, 'M', '𑣐'), - (0x118B1, 'M', '𑣑'), - (0x118B2, 'M', '𑣒'), - (0x118B3, 'M', '𑣓'), - (0x118B4, 'M', '𑣔'), - (0x118B5, 'M', '𑣕'), - (0x118B6, 'M', '𑣖'), - (0x118B7, 'M', '𑣗'), - (0x118B8, 'M', '𑣘'), - (0x118B9, 'M', '𑣙'), - (0x118BA, 'M', '𑣚'), - (0x118BB, 'M', '𑣛'), - (0x118BC, 'M', '𑣜'), - (0x118BD, 'M', '𑣝'), - (0x118BE, 'M', '𑣞'), - (0x118BF, 'M', '𑣟'), - (0x118C0, 'V'), - (0x118F3, 'X'), - (0x118FF, 'V'), - (0x11907, 'X'), - (0x11909, 'V'), - (0x1190A, 'X'), - (0x1190C, 'V'), - (0x11914, 'X'), - (0x11915, 'V'), - (0x11917, 'X'), - (0x11918, 'V'), - (0x11936, 'X'), - (0x11937, 'V'), - (0x11939, 'X'), - (0x1193B, 'V'), - (0x11947, 'X'), - (0x11950, 'V'), - (0x1195A, 'X'), - (0x119A0, 'V'), - (0x119A8, 'X'), - (0x119AA, 'V'), - (0x119D8, 'X'), - (0x119DA, 'V'), - (0x119E5, 'X'), - (0x11A00, 'V'), - (0x11A48, 'X'), - (0x11A50, 'V'), - (0x11AA3, 'X'), - (0x11AB0, 'V'), - (0x11AF9, 'X'), - (0x11B00, 'V'), - (0x11B0A, 'X'), - (0x11C00, 'V'), - (0x11C09, 'X'), - (0x11C0A, 'V'), - (0x11C37, 'X'), - (0x11C38, 'V'), - (0x11C46, 'X'), - (0x11C50, 'V'), - (0x11C6D, 'X'), - (0x11C70, 'V'), - (0x11C90, 'X'), - (0x11C92, 'V'), - (0x11CA8, 'X'), - (0x11CA9, 'V'), - (0x11CB7, 'X'), - (0x11D00, 'V'), - (0x11D07, 'X'), - (0x11D08, 'V'), - (0x11D0A, 'X'), - (0x11D0B, 'V'), - (0x11D37, 'X'), - (0x11D3A, 'V'), - (0x11D3B, 'X'), - (0x11D3C, 'V'), - (0x11D3E, 'X'), - (0x11D3F, 'V'), - (0x11D48, 'X'), - (0x11D50, 'V'), - (0x11D5A, 'X'), - (0x11D60, 'V'), - (0x11D66, 'X'), - (0x11D67, 'V'), - (0x11D69, 'X'), - (0x11D6A, 'V'), - (0x11D8F, 'X'), - (0x11D90, 'V'), - (0x11D92, 'X'), - (0x11D93, 'V'), - (0x11D99, 'X'), - (0x11DA0, 'V'), - (0x11DAA, 'X'), - (0x11EE0, 'V'), - (0x11EF9, 'X'), - (0x11F00, 'V'), - ] - -def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x11F11, 'X'), - (0x11F12, 'V'), - (0x11F3B, 'X'), - (0x11F3E, 'V'), - (0x11F5A, 'X'), - (0x11FB0, 'V'), - (0x11FB1, 'X'), - (0x11FC0, 'V'), - (0x11FF2, 'X'), - (0x11FFF, 'V'), - (0x1239A, 'X'), - (0x12400, 'V'), - (0x1246F, 'X'), - (0x12470, 'V'), - (0x12475, 'X'), - (0x12480, 'V'), - (0x12544, 'X'), - (0x12F90, 'V'), - (0x12FF3, 'X'), - (0x13000, 'V'), - (0x13430, 'X'), - (0x13440, 'V'), - (0x13456, 'X'), - (0x14400, 'V'), - (0x14647, 'X'), - (0x16800, 'V'), - (0x16A39, 'X'), - (0x16A40, 'V'), - (0x16A5F, 'X'), - (0x16A60, 'V'), - (0x16A6A, 'X'), - (0x16A6E, 'V'), - (0x16ABF, 'X'), - (0x16AC0, 'V'), - (0x16ACA, 'X'), - (0x16AD0, 'V'), - (0x16AEE, 'X'), - (0x16AF0, 'V'), - (0x16AF6, 'X'), - (0x16B00, 'V'), - (0x16B46, 'X'), - (0x16B50, 'V'), - (0x16B5A, 'X'), - (0x16B5B, 'V'), - (0x16B62, 'X'), - (0x16B63, 'V'), - (0x16B78, 'X'), - (0x16B7D, 'V'), - (0x16B90, 'X'), - (0x16E40, 'M', '𖹠'), - (0x16E41, 'M', '𖹡'), - (0x16E42, 'M', '𖹢'), - (0x16E43, 'M', '𖹣'), - (0x16E44, 'M', '𖹤'), - (0x16E45, 'M', '𖹥'), - (0x16E46, 'M', '𖹦'), - (0x16E47, 'M', '𖹧'), - (0x16E48, 'M', '𖹨'), - (0x16E49, 'M', '𖹩'), - (0x16E4A, 'M', '𖹪'), - (0x16E4B, 'M', '𖹫'), - (0x16E4C, 'M', '𖹬'), - (0x16E4D, 'M', '𖹭'), - (0x16E4E, 'M', '𖹮'), - (0x16E4F, 'M', '𖹯'), - (0x16E50, 'M', '𖹰'), - (0x16E51, 'M', '𖹱'), - (0x16E52, 'M', '𖹲'), - (0x16E53, 'M', '𖹳'), - (0x16E54, 'M', '𖹴'), - (0x16E55, 'M', '𖹵'), - (0x16E56, 'M', '𖹶'), - (0x16E57, 'M', '𖹷'), - (0x16E58, 'M', '𖹸'), - (0x16E59, 'M', '𖹹'), - (0x16E5A, 'M', '𖹺'), - (0x16E5B, 'M', '𖹻'), - (0x16E5C, 'M', '𖹼'), - (0x16E5D, 'M', '𖹽'), - (0x16E5E, 'M', '𖹾'), - (0x16E5F, 'M', '𖹿'), - (0x16E60, 'V'), - (0x16E9B, 'X'), - (0x16F00, 'V'), - (0x16F4B, 'X'), - (0x16F4F, 'V'), - (0x16F88, 'X'), - (0x16F8F, 'V'), - (0x16FA0, 'X'), - (0x16FE0, 'V'), - (0x16FE5, 'X'), - (0x16FF0, 'V'), - (0x16FF2, 'X'), - (0x17000, 'V'), - (0x187F8, 'X'), - (0x18800, 'V'), - (0x18CD6, 'X'), - (0x18D00, 'V'), - (0x18D09, 'X'), - (0x1AFF0, 'V'), - ] - -def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1AFF4, 'X'), - (0x1AFF5, 'V'), - (0x1AFFC, 'X'), - (0x1AFFD, 'V'), - (0x1AFFF, 'X'), - (0x1B000, 'V'), - (0x1B123, 'X'), - (0x1B132, 'V'), - (0x1B133, 'X'), - (0x1B150, 'V'), - (0x1B153, 'X'), - (0x1B155, 'V'), - (0x1B156, 'X'), - (0x1B164, 'V'), - (0x1B168, 'X'), - (0x1B170, 'V'), - (0x1B2FC, 'X'), - (0x1BC00, 'V'), - (0x1BC6B, 'X'), - (0x1BC70, 'V'), - (0x1BC7D, 'X'), - (0x1BC80, 'V'), - (0x1BC89, 'X'), - (0x1BC90, 'V'), - (0x1BC9A, 'X'), - (0x1BC9C, 'V'), - (0x1BCA0, 'I'), - (0x1BCA4, 'X'), - (0x1CF00, 'V'), - (0x1CF2E, 'X'), - (0x1CF30, 'V'), - (0x1CF47, 'X'), - (0x1CF50, 'V'), - (0x1CFC4, 'X'), - (0x1D000, 'V'), - (0x1D0F6, 'X'), - (0x1D100, 'V'), - (0x1D127, 'X'), - (0x1D129, 'V'), - (0x1D15E, 'M', '𝅗𝅥'), - (0x1D15F, 'M', '𝅘𝅥'), - (0x1D160, 'M', '𝅘𝅥𝅮'), - (0x1D161, 'M', '𝅘𝅥𝅯'), - (0x1D162, 'M', '𝅘𝅥𝅰'), - (0x1D163, 'M', '𝅘𝅥𝅱'), - (0x1D164, 'M', '𝅘𝅥𝅲'), - (0x1D165, 'V'), - (0x1D173, 'X'), - (0x1D17B, 'V'), - (0x1D1BB, 'M', '𝆹𝅥'), - (0x1D1BC, 'M', '𝆺𝅥'), - (0x1D1BD, 'M', '𝆹𝅥𝅮'), - (0x1D1BE, 'M', '𝆺𝅥𝅮'), - (0x1D1BF, 'M', '𝆹𝅥𝅯'), - (0x1D1C0, 'M', '𝆺𝅥𝅯'), - (0x1D1C1, 'V'), - (0x1D1EB, 'X'), - (0x1D200, 'V'), - (0x1D246, 'X'), - (0x1D2C0, 'V'), - (0x1D2D4, 'X'), - (0x1D2E0, 'V'), - (0x1D2F4, 'X'), - (0x1D300, 'V'), - (0x1D357, 'X'), - (0x1D360, 'V'), - (0x1D379, 'X'), - (0x1D400, 'M', 'a'), - (0x1D401, 'M', 'b'), - (0x1D402, 'M', 'c'), - (0x1D403, 'M', 'd'), - (0x1D404, 'M', 'e'), - (0x1D405, 'M', 'f'), - (0x1D406, 'M', 'g'), - (0x1D407, 'M', 'h'), - (0x1D408, 'M', 'i'), - (0x1D409, 'M', 'j'), - (0x1D40A, 'M', 'k'), - (0x1D40B, 'M', 'l'), - (0x1D40C, 'M', 'm'), - (0x1D40D, 'M', 'n'), - (0x1D40E, 'M', 'o'), - (0x1D40F, 'M', 'p'), - (0x1D410, 'M', 'q'), - (0x1D411, 'M', 'r'), - (0x1D412, 'M', 's'), - (0x1D413, 'M', 't'), - (0x1D414, 'M', 'u'), - (0x1D415, 'M', 'v'), - (0x1D416, 'M', 'w'), - (0x1D417, 'M', 'x'), - (0x1D418, 'M', 'y'), - (0x1D419, 'M', 'z'), - (0x1D41A, 'M', 'a'), - (0x1D41B, 'M', 'b'), - (0x1D41C, 'M', 'c'), - (0x1D41D, 'M', 'd'), - (0x1D41E, 'M', 'e'), - (0x1D41F, 'M', 'f'), - (0x1D420, 'M', 'g'), - ] - -def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D421, 'M', 'h'), - (0x1D422, 'M', 'i'), - (0x1D423, 'M', 'j'), - (0x1D424, 'M', 'k'), - (0x1D425, 'M', 'l'), - (0x1D426, 'M', 'm'), - (0x1D427, 'M', 'n'), - (0x1D428, 'M', 'o'), - (0x1D429, 'M', 'p'), - (0x1D42A, 'M', 'q'), - (0x1D42B, 'M', 'r'), - (0x1D42C, 'M', 's'), - (0x1D42D, 'M', 't'), - (0x1D42E, 'M', 'u'), - (0x1D42F, 'M', 'v'), - (0x1D430, 'M', 'w'), - (0x1D431, 'M', 'x'), - (0x1D432, 'M', 'y'), - (0x1D433, 'M', 'z'), - (0x1D434, 'M', 'a'), - (0x1D435, 'M', 'b'), - (0x1D436, 'M', 'c'), - (0x1D437, 'M', 'd'), - (0x1D438, 'M', 'e'), - (0x1D439, 'M', 'f'), - (0x1D43A, 'M', 'g'), - (0x1D43B, 'M', 'h'), - (0x1D43C, 'M', 'i'), - (0x1D43D, 'M', 'j'), - (0x1D43E, 'M', 'k'), - (0x1D43F, 'M', 'l'), - (0x1D440, 'M', 'm'), - (0x1D441, 'M', 'n'), - (0x1D442, 'M', 'o'), - (0x1D443, 'M', 'p'), - (0x1D444, 'M', 'q'), - (0x1D445, 'M', 'r'), - (0x1D446, 'M', 's'), - (0x1D447, 'M', 't'), - (0x1D448, 'M', 'u'), - (0x1D449, 'M', 'v'), - (0x1D44A, 'M', 'w'), - (0x1D44B, 'M', 'x'), - (0x1D44C, 'M', 'y'), - (0x1D44D, 'M', 'z'), - (0x1D44E, 'M', 'a'), - (0x1D44F, 'M', 'b'), - (0x1D450, 'M', 'c'), - (0x1D451, 'M', 'd'), - (0x1D452, 'M', 'e'), - (0x1D453, 'M', 'f'), - (0x1D454, 'M', 'g'), - (0x1D455, 'X'), - (0x1D456, 'M', 'i'), - (0x1D457, 'M', 'j'), - (0x1D458, 'M', 'k'), - (0x1D459, 'M', 'l'), - (0x1D45A, 'M', 'm'), - (0x1D45B, 'M', 'n'), - (0x1D45C, 'M', 'o'), - (0x1D45D, 'M', 'p'), - (0x1D45E, 'M', 'q'), - (0x1D45F, 'M', 'r'), - (0x1D460, 'M', 's'), - (0x1D461, 'M', 't'), - (0x1D462, 'M', 'u'), - (0x1D463, 'M', 'v'), - (0x1D464, 'M', 'w'), - (0x1D465, 'M', 'x'), - (0x1D466, 'M', 'y'), - (0x1D467, 'M', 'z'), - (0x1D468, 'M', 'a'), - (0x1D469, 'M', 'b'), - (0x1D46A, 'M', 'c'), - (0x1D46B, 'M', 'd'), - (0x1D46C, 'M', 'e'), - (0x1D46D, 'M', 'f'), - (0x1D46E, 'M', 'g'), - (0x1D46F, 'M', 'h'), - (0x1D470, 'M', 'i'), - (0x1D471, 'M', 'j'), - (0x1D472, 'M', 'k'), - (0x1D473, 'M', 'l'), - (0x1D474, 'M', 'm'), - (0x1D475, 'M', 'n'), - (0x1D476, 'M', 'o'), - (0x1D477, 'M', 'p'), - (0x1D478, 'M', 'q'), - (0x1D479, 'M', 'r'), - (0x1D47A, 'M', 's'), - (0x1D47B, 'M', 't'), - (0x1D47C, 'M', 'u'), - (0x1D47D, 'M', 'v'), - (0x1D47E, 'M', 'w'), - (0x1D47F, 'M', 'x'), - (0x1D480, 'M', 'y'), - (0x1D481, 'M', 'z'), - (0x1D482, 'M', 'a'), - (0x1D483, 'M', 'b'), - (0x1D484, 'M', 'c'), - ] - -def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D485, 'M', 'd'), - (0x1D486, 'M', 'e'), - (0x1D487, 'M', 'f'), - (0x1D488, 'M', 'g'), - (0x1D489, 'M', 'h'), - (0x1D48A, 'M', 'i'), - (0x1D48B, 'M', 'j'), - (0x1D48C, 'M', 'k'), - (0x1D48D, 'M', 'l'), - (0x1D48E, 'M', 'm'), - (0x1D48F, 'M', 'n'), - (0x1D490, 'M', 'o'), - (0x1D491, 'M', 'p'), - (0x1D492, 'M', 'q'), - (0x1D493, 'M', 'r'), - (0x1D494, 'M', 's'), - (0x1D495, 'M', 't'), - (0x1D496, 'M', 'u'), - (0x1D497, 'M', 'v'), - (0x1D498, 'M', 'w'), - (0x1D499, 'M', 'x'), - (0x1D49A, 'M', 'y'), - (0x1D49B, 'M', 'z'), - (0x1D49C, 'M', 'a'), - (0x1D49D, 'X'), - (0x1D49E, 'M', 'c'), - (0x1D49F, 'M', 'd'), - (0x1D4A0, 'X'), - (0x1D4A2, 'M', 'g'), - (0x1D4A3, 'X'), - (0x1D4A5, 'M', 'j'), - (0x1D4A6, 'M', 'k'), - (0x1D4A7, 'X'), - (0x1D4A9, 'M', 'n'), - (0x1D4AA, 'M', 'o'), - (0x1D4AB, 'M', 'p'), - (0x1D4AC, 'M', 'q'), - (0x1D4AD, 'X'), - (0x1D4AE, 'M', 's'), - (0x1D4AF, 'M', 't'), - (0x1D4B0, 'M', 'u'), - (0x1D4B1, 'M', 'v'), - (0x1D4B2, 'M', 'w'), - (0x1D4B3, 'M', 'x'), - (0x1D4B4, 'M', 'y'), - (0x1D4B5, 'M', 'z'), - (0x1D4B6, 'M', 'a'), - (0x1D4B7, 'M', 'b'), - (0x1D4B8, 'M', 'c'), - (0x1D4B9, 'M', 'd'), - (0x1D4BA, 'X'), - (0x1D4BB, 'M', 'f'), - (0x1D4BC, 'X'), - (0x1D4BD, 'M', 'h'), - (0x1D4BE, 'M', 'i'), - (0x1D4BF, 'M', 'j'), - (0x1D4C0, 'M', 'k'), - (0x1D4C1, 'M', 'l'), - (0x1D4C2, 'M', 'm'), - (0x1D4C3, 'M', 'n'), - (0x1D4C4, 'X'), - (0x1D4C5, 'M', 'p'), - (0x1D4C6, 'M', 'q'), - (0x1D4C7, 'M', 'r'), - (0x1D4C8, 'M', 's'), - (0x1D4C9, 'M', 't'), - (0x1D4CA, 'M', 'u'), - (0x1D4CB, 'M', 'v'), - (0x1D4CC, 'M', 'w'), - (0x1D4CD, 'M', 'x'), - (0x1D4CE, 'M', 'y'), - (0x1D4CF, 'M', 'z'), - (0x1D4D0, 'M', 'a'), - (0x1D4D1, 'M', 'b'), - (0x1D4D2, 'M', 'c'), - (0x1D4D3, 'M', 'd'), - (0x1D4D4, 'M', 'e'), - (0x1D4D5, 'M', 'f'), - (0x1D4D6, 'M', 'g'), - (0x1D4D7, 'M', 'h'), - (0x1D4D8, 'M', 'i'), - (0x1D4D9, 'M', 'j'), - (0x1D4DA, 'M', 'k'), - (0x1D4DB, 'M', 'l'), - (0x1D4DC, 'M', 'm'), - (0x1D4DD, 'M', 'n'), - (0x1D4DE, 'M', 'o'), - (0x1D4DF, 'M', 'p'), - (0x1D4E0, 'M', 'q'), - (0x1D4E1, 'M', 'r'), - (0x1D4E2, 'M', 's'), - (0x1D4E3, 'M', 't'), - (0x1D4E4, 'M', 'u'), - (0x1D4E5, 'M', 'v'), - (0x1D4E6, 'M', 'w'), - (0x1D4E7, 'M', 'x'), - (0x1D4E8, 'M', 'y'), - (0x1D4E9, 'M', 'z'), - (0x1D4EA, 'M', 'a'), - (0x1D4EB, 'M', 'b'), - ] - -def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D4EC, 'M', 'c'), - (0x1D4ED, 'M', 'd'), - (0x1D4EE, 'M', 'e'), - (0x1D4EF, 'M', 'f'), - (0x1D4F0, 'M', 'g'), - (0x1D4F1, 'M', 'h'), - (0x1D4F2, 'M', 'i'), - (0x1D4F3, 'M', 'j'), - (0x1D4F4, 'M', 'k'), - (0x1D4F5, 'M', 'l'), - (0x1D4F6, 'M', 'm'), - (0x1D4F7, 'M', 'n'), - (0x1D4F8, 'M', 'o'), - (0x1D4F9, 'M', 'p'), - (0x1D4FA, 'M', 'q'), - (0x1D4FB, 'M', 'r'), - (0x1D4FC, 'M', 's'), - (0x1D4FD, 'M', 't'), - (0x1D4FE, 'M', 'u'), - (0x1D4FF, 'M', 'v'), - (0x1D500, 'M', 'w'), - (0x1D501, 'M', 'x'), - (0x1D502, 'M', 'y'), - (0x1D503, 'M', 'z'), - (0x1D504, 'M', 'a'), - (0x1D505, 'M', 'b'), - (0x1D506, 'X'), - (0x1D507, 'M', 'd'), - (0x1D508, 'M', 'e'), - (0x1D509, 'M', 'f'), - (0x1D50A, 'M', 'g'), - (0x1D50B, 'X'), - (0x1D50D, 'M', 'j'), - (0x1D50E, 'M', 'k'), - (0x1D50F, 'M', 'l'), - (0x1D510, 'M', 'm'), - (0x1D511, 'M', 'n'), - (0x1D512, 'M', 'o'), - (0x1D513, 'M', 'p'), - (0x1D514, 'M', 'q'), - (0x1D515, 'X'), - (0x1D516, 'M', 's'), - (0x1D517, 'M', 't'), - (0x1D518, 'M', 'u'), - (0x1D519, 'M', 'v'), - (0x1D51A, 'M', 'w'), - (0x1D51B, 'M', 'x'), - (0x1D51C, 'M', 'y'), - (0x1D51D, 'X'), - (0x1D51E, 'M', 'a'), - (0x1D51F, 'M', 'b'), - (0x1D520, 'M', 'c'), - (0x1D521, 'M', 'd'), - (0x1D522, 'M', 'e'), - (0x1D523, 'M', 'f'), - (0x1D524, 'M', 'g'), - (0x1D525, 'M', 'h'), - (0x1D526, 'M', 'i'), - (0x1D527, 'M', 'j'), - (0x1D528, 'M', 'k'), - (0x1D529, 'M', 'l'), - (0x1D52A, 'M', 'm'), - (0x1D52B, 'M', 'n'), - (0x1D52C, 'M', 'o'), - (0x1D52D, 'M', 'p'), - (0x1D52E, 'M', 'q'), - (0x1D52F, 'M', 'r'), - (0x1D530, 'M', 's'), - (0x1D531, 'M', 't'), - (0x1D532, 'M', 'u'), - (0x1D533, 'M', 'v'), - (0x1D534, 'M', 'w'), - (0x1D535, 'M', 'x'), - (0x1D536, 'M', 'y'), - (0x1D537, 'M', 'z'), - (0x1D538, 'M', 'a'), - (0x1D539, 'M', 'b'), - (0x1D53A, 'X'), - (0x1D53B, 'M', 'd'), - (0x1D53C, 'M', 'e'), - (0x1D53D, 'M', 'f'), - (0x1D53E, 'M', 'g'), - (0x1D53F, 'X'), - (0x1D540, 'M', 'i'), - (0x1D541, 'M', 'j'), - (0x1D542, 'M', 'k'), - (0x1D543, 'M', 'l'), - (0x1D544, 'M', 'm'), - (0x1D545, 'X'), - (0x1D546, 'M', 'o'), - (0x1D547, 'X'), - (0x1D54A, 'M', 's'), - (0x1D54B, 'M', 't'), - (0x1D54C, 'M', 'u'), - (0x1D54D, 'M', 'v'), - (0x1D54E, 'M', 'w'), - (0x1D54F, 'M', 'x'), - (0x1D550, 'M', 'y'), - (0x1D551, 'X'), - (0x1D552, 'M', 'a'), - ] - -def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D553, 'M', 'b'), - (0x1D554, 'M', 'c'), - (0x1D555, 'M', 'd'), - (0x1D556, 'M', 'e'), - (0x1D557, 'M', 'f'), - (0x1D558, 'M', 'g'), - (0x1D559, 'M', 'h'), - (0x1D55A, 'M', 'i'), - (0x1D55B, 'M', 'j'), - (0x1D55C, 'M', 'k'), - (0x1D55D, 'M', 'l'), - (0x1D55E, 'M', 'm'), - (0x1D55F, 'M', 'n'), - (0x1D560, 'M', 'o'), - (0x1D561, 'M', 'p'), - (0x1D562, 'M', 'q'), - (0x1D563, 'M', 'r'), - (0x1D564, 'M', 's'), - (0x1D565, 'M', 't'), - (0x1D566, 'M', 'u'), - (0x1D567, 'M', 'v'), - (0x1D568, 'M', 'w'), - (0x1D569, 'M', 'x'), - (0x1D56A, 'M', 'y'), - (0x1D56B, 'M', 'z'), - (0x1D56C, 'M', 'a'), - (0x1D56D, 'M', 'b'), - (0x1D56E, 'M', 'c'), - (0x1D56F, 'M', 'd'), - (0x1D570, 'M', 'e'), - (0x1D571, 'M', 'f'), - (0x1D572, 'M', 'g'), - (0x1D573, 'M', 'h'), - (0x1D574, 'M', 'i'), - (0x1D575, 'M', 'j'), - (0x1D576, 'M', 'k'), - (0x1D577, 'M', 'l'), - (0x1D578, 'M', 'm'), - (0x1D579, 'M', 'n'), - (0x1D57A, 'M', 'o'), - (0x1D57B, 'M', 'p'), - (0x1D57C, 'M', 'q'), - (0x1D57D, 'M', 'r'), - (0x1D57E, 'M', 's'), - (0x1D57F, 'M', 't'), - (0x1D580, 'M', 'u'), - (0x1D581, 'M', 'v'), - (0x1D582, 'M', 'w'), - (0x1D583, 'M', 'x'), - (0x1D584, 'M', 'y'), - (0x1D585, 'M', 'z'), - (0x1D586, 'M', 'a'), - (0x1D587, 'M', 'b'), - (0x1D588, 'M', 'c'), - (0x1D589, 'M', 'd'), - (0x1D58A, 'M', 'e'), - (0x1D58B, 'M', 'f'), - (0x1D58C, 'M', 'g'), - (0x1D58D, 'M', 'h'), - (0x1D58E, 'M', 'i'), - (0x1D58F, 'M', 'j'), - (0x1D590, 'M', 'k'), - (0x1D591, 'M', 'l'), - (0x1D592, 'M', 'm'), - (0x1D593, 'M', 'n'), - (0x1D594, 'M', 'o'), - (0x1D595, 'M', 'p'), - (0x1D596, 'M', 'q'), - (0x1D597, 'M', 'r'), - (0x1D598, 'M', 's'), - (0x1D599, 'M', 't'), - (0x1D59A, 'M', 'u'), - (0x1D59B, 'M', 'v'), - (0x1D59C, 'M', 'w'), - (0x1D59D, 'M', 'x'), - (0x1D59E, 'M', 'y'), - (0x1D59F, 'M', 'z'), - (0x1D5A0, 'M', 'a'), - (0x1D5A1, 'M', 'b'), - (0x1D5A2, 'M', 'c'), - (0x1D5A3, 'M', 'd'), - (0x1D5A4, 'M', 'e'), - (0x1D5A5, 'M', 'f'), - (0x1D5A6, 'M', 'g'), - (0x1D5A7, 'M', 'h'), - (0x1D5A8, 'M', 'i'), - (0x1D5A9, 'M', 'j'), - (0x1D5AA, 'M', 'k'), - (0x1D5AB, 'M', 'l'), - (0x1D5AC, 'M', 'm'), - (0x1D5AD, 'M', 'n'), - (0x1D5AE, 'M', 'o'), - (0x1D5AF, 'M', 'p'), - (0x1D5B0, 'M', 'q'), - (0x1D5B1, 'M', 'r'), - (0x1D5B2, 'M', 's'), - (0x1D5B3, 'M', 't'), - (0x1D5B4, 'M', 'u'), - (0x1D5B5, 'M', 'v'), - (0x1D5B6, 'M', 'w'), - ] - -def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D5B7, 'M', 'x'), - (0x1D5B8, 'M', 'y'), - (0x1D5B9, 'M', 'z'), - (0x1D5BA, 'M', 'a'), - (0x1D5BB, 'M', 'b'), - (0x1D5BC, 'M', 'c'), - (0x1D5BD, 'M', 'd'), - (0x1D5BE, 'M', 'e'), - (0x1D5BF, 'M', 'f'), - (0x1D5C0, 'M', 'g'), - (0x1D5C1, 'M', 'h'), - (0x1D5C2, 'M', 'i'), - (0x1D5C3, 'M', 'j'), - (0x1D5C4, 'M', 'k'), - (0x1D5C5, 'M', 'l'), - (0x1D5C6, 'M', 'm'), - (0x1D5C7, 'M', 'n'), - (0x1D5C8, 'M', 'o'), - (0x1D5C9, 'M', 'p'), - (0x1D5CA, 'M', 'q'), - (0x1D5CB, 'M', 'r'), - (0x1D5CC, 'M', 's'), - (0x1D5CD, 'M', 't'), - (0x1D5CE, 'M', 'u'), - (0x1D5CF, 'M', 'v'), - (0x1D5D0, 'M', 'w'), - (0x1D5D1, 'M', 'x'), - (0x1D5D2, 'M', 'y'), - (0x1D5D3, 'M', 'z'), - (0x1D5D4, 'M', 'a'), - (0x1D5D5, 'M', 'b'), - (0x1D5D6, 'M', 'c'), - (0x1D5D7, 'M', 'd'), - (0x1D5D8, 'M', 'e'), - (0x1D5D9, 'M', 'f'), - (0x1D5DA, 'M', 'g'), - (0x1D5DB, 'M', 'h'), - (0x1D5DC, 'M', 'i'), - (0x1D5DD, 'M', 'j'), - (0x1D5DE, 'M', 'k'), - (0x1D5DF, 'M', 'l'), - (0x1D5E0, 'M', 'm'), - (0x1D5E1, 'M', 'n'), - (0x1D5E2, 'M', 'o'), - (0x1D5E3, 'M', 'p'), - (0x1D5E4, 'M', 'q'), - (0x1D5E5, 'M', 'r'), - (0x1D5E6, 'M', 's'), - (0x1D5E7, 'M', 't'), - (0x1D5E8, 'M', 'u'), - (0x1D5E9, 'M', 'v'), - (0x1D5EA, 'M', 'w'), - (0x1D5EB, 'M', 'x'), - (0x1D5EC, 'M', 'y'), - (0x1D5ED, 'M', 'z'), - (0x1D5EE, 'M', 'a'), - (0x1D5EF, 'M', 'b'), - (0x1D5F0, 'M', 'c'), - (0x1D5F1, 'M', 'd'), - (0x1D5F2, 'M', 'e'), - (0x1D5F3, 'M', 'f'), - (0x1D5F4, 'M', 'g'), - (0x1D5F5, 'M', 'h'), - (0x1D5F6, 'M', 'i'), - (0x1D5F7, 'M', 'j'), - (0x1D5F8, 'M', 'k'), - (0x1D5F9, 'M', 'l'), - (0x1D5FA, 'M', 'm'), - (0x1D5FB, 'M', 'n'), - (0x1D5FC, 'M', 'o'), - (0x1D5FD, 'M', 'p'), - (0x1D5FE, 'M', 'q'), - (0x1D5FF, 'M', 'r'), - (0x1D600, 'M', 's'), - (0x1D601, 'M', 't'), - (0x1D602, 'M', 'u'), - (0x1D603, 'M', 'v'), - (0x1D604, 'M', 'w'), - (0x1D605, 'M', 'x'), - (0x1D606, 'M', 'y'), - (0x1D607, 'M', 'z'), - (0x1D608, 'M', 'a'), - (0x1D609, 'M', 'b'), - (0x1D60A, 'M', 'c'), - (0x1D60B, 'M', 'd'), - (0x1D60C, 'M', 'e'), - (0x1D60D, 'M', 'f'), - (0x1D60E, 'M', 'g'), - (0x1D60F, 'M', 'h'), - (0x1D610, 'M', 'i'), - (0x1D611, 'M', 'j'), - (0x1D612, 'M', 'k'), - (0x1D613, 'M', 'l'), - (0x1D614, 'M', 'm'), - (0x1D615, 'M', 'n'), - (0x1D616, 'M', 'o'), - (0x1D617, 'M', 'p'), - (0x1D618, 'M', 'q'), - (0x1D619, 'M', 'r'), - (0x1D61A, 'M', 's'), - ] - -def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D61B, 'M', 't'), - (0x1D61C, 'M', 'u'), - (0x1D61D, 'M', 'v'), - (0x1D61E, 'M', 'w'), - (0x1D61F, 'M', 'x'), - (0x1D620, 'M', 'y'), - (0x1D621, 'M', 'z'), - (0x1D622, 'M', 'a'), - (0x1D623, 'M', 'b'), - (0x1D624, 'M', 'c'), - (0x1D625, 'M', 'd'), - (0x1D626, 'M', 'e'), - (0x1D627, 'M', 'f'), - (0x1D628, 'M', 'g'), - (0x1D629, 'M', 'h'), - (0x1D62A, 'M', 'i'), - (0x1D62B, 'M', 'j'), - (0x1D62C, 'M', 'k'), - (0x1D62D, 'M', 'l'), - (0x1D62E, 'M', 'm'), - (0x1D62F, 'M', 'n'), - (0x1D630, 'M', 'o'), - (0x1D631, 'M', 'p'), - (0x1D632, 'M', 'q'), - (0x1D633, 'M', 'r'), - (0x1D634, 'M', 's'), - (0x1D635, 'M', 't'), - (0x1D636, 'M', 'u'), - (0x1D637, 'M', 'v'), - (0x1D638, 'M', 'w'), - (0x1D639, 'M', 'x'), - (0x1D63A, 'M', 'y'), - (0x1D63B, 'M', 'z'), - (0x1D63C, 'M', 'a'), - (0x1D63D, 'M', 'b'), - (0x1D63E, 'M', 'c'), - (0x1D63F, 'M', 'd'), - (0x1D640, 'M', 'e'), - (0x1D641, 'M', 'f'), - (0x1D642, 'M', 'g'), - (0x1D643, 'M', 'h'), - (0x1D644, 'M', 'i'), - (0x1D645, 'M', 'j'), - (0x1D646, 'M', 'k'), - (0x1D647, 'M', 'l'), - (0x1D648, 'M', 'm'), - (0x1D649, 'M', 'n'), - (0x1D64A, 'M', 'o'), - (0x1D64B, 'M', 'p'), - (0x1D64C, 'M', 'q'), - (0x1D64D, 'M', 'r'), - (0x1D64E, 'M', 's'), - (0x1D64F, 'M', 't'), - (0x1D650, 'M', 'u'), - (0x1D651, 'M', 'v'), - (0x1D652, 'M', 'w'), - (0x1D653, 'M', 'x'), - (0x1D654, 'M', 'y'), - (0x1D655, 'M', 'z'), - (0x1D656, 'M', 'a'), - (0x1D657, 'M', 'b'), - (0x1D658, 'M', 'c'), - (0x1D659, 'M', 'd'), - (0x1D65A, 'M', 'e'), - (0x1D65B, 'M', 'f'), - (0x1D65C, 'M', 'g'), - (0x1D65D, 'M', 'h'), - (0x1D65E, 'M', 'i'), - (0x1D65F, 'M', 'j'), - (0x1D660, 'M', 'k'), - (0x1D661, 'M', 'l'), - (0x1D662, 'M', 'm'), - (0x1D663, 'M', 'n'), - (0x1D664, 'M', 'o'), - (0x1D665, 'M', 'p'), - (0x1D666, 'M', 'q'), - (0x1D667, 'M', 'r'), - (0x1D668, 'M', 's'), - (0x1D669, 'M', 't'), - (0x1D66A, 'M', 'u'), - (0x1D66B, 'M', 'v'), - (0x1D66C, 'M', 'w'), - (0x1D66D, 'M', 'x'), - (0x1D66E, 'M', 'y'), - (0x1D66F, 'M', 'z'), - (0x1D670, 'M', 'a'), - (0x1D671, 'M', 'b'), - (0x1D672, 'M', 'c'), - (0x1D673, 'M', 'd'), - (0x1D674, 'M', 'e'), - (0x1D675, 'M', 'f'), - (0x1D676, 'M', 'g'), - (0x1D677, 'M', 'h'), - (0x1D678, 'M', 'i'), - (0x1D679, 'M', 'j'), - (0x1D67A, 'M', 'k'), - (0x1D67B, 'M', 'l'), - (0x1D67C, 'M', 'm'), - (0x1D67D, 'M', 'n'), - (0x1D67E, 'M', 'o'), - ] - -def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D67F, 'M', 'p'), - (0x1D680, 'M', 'q'), - (0x1D681, 'M', 'r'), - (0x1D682, 'M', 's'), - (0x1D683, 'M', 't'), - (0x1D684, 'M', 'u'), - (0x1D685, 'M', 'v'), - (0x1D686, 'M', 'w'), - (0x1D687, 'M', 'x'), - (0x1D688, 'M', 'y'), - (0x1D689, 'M', 'z'), - (0x1D68A, 'M', 'a'), - (0x1D68B, 'M', 'b'), - (0x1D68C, 'M', 'c'), - (0x1D68D, 'M', 'd'), - (0x1D68E, 'M', 'e'), - (0x1D68F, 'M', 'f'), - (0x1D690, 'M', 'g'), - (0x1D691, 'M', 'h'), - (0x1D692, 'M', 'i'), - (0x1D693, 'M', 'j'), - (0x1D694, 'M', 'k'), - (0x1D695, 'M', 'l'), - (0x1D696, 'M', 'm'), - (0x1D697, 'M', 'n'), - (0x1D698, 'M', 'o'), - (0x1D699, 'M', 'p'), - (0x1D69A, 'M', 'q'), - (0x1D69B, 'M', 'r'), - (0x1D69C, 'M', 's'), - (0x1D69D, 'M', 't'), - (0x1D69E, 'M', 'u'), - (0x1D69F, 'M', 'v'), - (0x1D6A0, 'M', 'w'), - (0x1D6A1, 'M', 'x'), - (0x1D6A2, 'M', 'y'), - (0x1D6A3, 'M', 'z'), - (0x1D6A4, 'M', 'ı'), - (0x1D6A5, 'M', 'ȷ'), - (0x1D6A6, 'X'), - (0x1D6A8, 'M', 'α'), - (0x1D6A9, 'M', 'β'), - (0x1D6AA, 'M', 'γ'), - (0x1D6AB, 'M', 'δ'), - (0x1D6AC, 'M', 'ε'), - (0x1D6AD, 'M', 'ζ'), - (0x1D6AE, 'M', 'η'), - (0x1D6AF, 'M', 'θ'), - (0x1D6B0, 'M', 'ι'), - (0x1D6B1, 'M', 'κ'), - (0x1D6B2, 'M', 'λ'), - (0x1D6B3, 'M', 'μ'), - (0x1D6B4, 'M', 'ν'), - (0x1D6B5, 'M', 'ξ'), - (0x1D6B6, 'M', 'ο'), - (0x1D6B7, 'M', 'π'), - (0x1D6B8, 'M', 'ρ'), - (0x1D6B9, 'M', 'θ'), - (0x1D6BA, 'M', 'σ'), - (0x1D6BB, 'M', 'τ'), - (0x1D6BC, 'M', 'υ'), - (0x1D6BD, 'M', 'φ'), - (0x1D6BE, 'M', 'χ'), - (0x1D6BF, 'M', 'ψ'), - (0x1D6C0, 'M', 'ω'), - (0x1D6C1, 'M', '∇'), - (0x1D6C2, 'M', 'α'), - (0x1D6C3, 'M', 'β'), - (0x1D6C4, 'M', 'γ'), - (0x1D6C5, 'M', 'δ'), - (0x1D6C6, 'M', 'ε'), - (0x1D6C7, 'M', 'ζ'), - (0x1D6C8, 'M', 'η'), - (0x1D6C9, 'M', 'θ'), - (0x1D6CA, 'M', 'ι'), - (0x1D6CB, 'M', 'κ'), - (0x1D6CC, 'M', 'λ'), - (0x1D6CD, 'M', 'μ'), - (0x1D6CE, 'M', 'ν'), - (0x1D6CF, 'M', 'ξ'), - (0x1D6D0, 'M', 'ο'), - (0x1D6D1, 'M', 'π'), - (0x1D6D2, 'M', 'ρ'), - (0x1D6D3, 'M', 'σ'), - (0x1D6D5, 'M', 'τ'), - (0x1D6D6, 'M', 'υ'), - (0x1D6D7, 'M', 'φ'), - (0x1D6D8, 'M', 'χ'), - (0x1D6D9, 'M', 'ψ'), - (0x1D6DA, 'M', 'ω'), - (0x1D6DB, 'M', '∂'), - (0x1D6DC, 'M', 'ε'), - (0x1D6DD, 'M', 'θ'), - (0x1D6DE, 'M', 'κ'), - (0x1D6DF, 'M', 'φ'), - (0x1D6E0, 'M', 'ρ'), - (0x1D6E1, 'M', 'π'), - (0x1D6E2, 'M', 'α'), - (0x1D6E3, 'M', 'β'), - (0x1D6E4, 'M', 'γ'), - ] - -def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D6E5, 'M', 'δ'), - (0x1D6E6, 'M', 'ε'), - (0x1D6E7, 'M', 'ζ'), - (0x1D6E8, 'M', 'η'), - (0x1D6E9, 'M', 'θ'), - (0x1D6EA, 'M', 'ι'), - (0x1D6EB, 'M', 'κ'), - (0x1D6EC, 'M', 'λ'), - (0x1D6ED, 'M', 'μ'), - (0x1D6EE, 'M', 'ν'), - (0x1D6EF, 'M', 'ξ'), - (0x1D6F0, 'M', 'ο'), - (0x1D6F1, 'M', 'π'), - (0x1D6F2, 'M', 'ρ'), - (0x1D6F3, 'M', 'θ'), - (0x1D6F4, 'M', 'σ'), - (0x1D6F5, 'M', 'τ'), - (0x1D6F6, 'M', 'υ'), - (0x1D6F7, 'M', 'φ'), - (0x1D6F8, 'M', 'χ'), - (0x1D6F9, 'M', 'ψ'), - (0x1D6FA, 'M', 'ω'), - (0x1D6FB, 'M', '∇'), - (0x1D6FC, 'M', 'α'), - (0x1D6FD, 'M', 'β'), - (0x1D6FE, 'M', 'γ'), - (0x1D6FF, 'M', 'δ'), - (0x1D700, 'M', 'ε'), - (0x1D701, 'M', 'ζ'), - (0x1D702, 'M', 'η'), - (0x1D703, 'M', 'θ'), - (0x1D704, 'M', 'ι'), - (0x1D705, 'M', 'κ'), - (0x1D706, 'M', 'λ'), - (0x1D707, 'M', 'μ'), - (0x1D708, 'M', 'ν'), - (0x1D709, 'M', 'ξ'), - (0x1D70A, 'M', 'ο'), - (0x1D70B, 'M', 'π'), - (0x1D70C, 'M', 'ρ'), - (0x1D70D, 'M', 'σ'), - (0x1D70F, 'M', 'τ'), - (0x1D710, 'M', 'υ'), - (0x1D711, 'M', 'φ'), - (0x1D712, 'M', 'χ'), - (0x1D713, 'M', 'ψ'), - (0x1D714, 'M', 'ω'), - (0x1D715, 'M', '∂'), - (0x1D716, 'M', 'ε'), - (0x1D717, 'M', 'θ'), - (0x1D718, 'M', 'κ'), - (0x1D719, 'M', 'φ'), - (0x1D71A, 'M', 'ρ'), - (0x1D71B, 'M', 'π'), - (0x1D71C, 'M', 'α'), - (0x1D71D, 'M', 'β'), - (0x1D71E, 'M', 'γ'), - (0x1D71F, 'M', 'δ'), - (0x1D720, 'M', 'ε'), - (0x1D721, 'M', 'ζ'), - (0x1D722, 'M', 'η'), - (0x1D723, 'M', 'θ'), - (0x1D724, 'M', 'ι'), - (0x1D725, 'M', 'κ'), - (0x1D726, 'M', 'λ'), - (0x1D727, 'M', 'μ'), - (0x1D728, 'M', 'ν'), - (0x1D729, 'M', 'ξ'), - (0x1D72A, 'M', 'ο'), - (0x1D72B, 'M', 'π'), - (0x1D72C, 'M', 'ρ'), - (0x1D72D, 'M', 'θ'), - (0x1D72E, 'M', 'σ'), - (0x1D72F, 'M', 'τ'), - (0x1D730, 'M', 'υ'), - (0x1D731, 'M', 'φ'), - (0x1D732, 'M', 'χ'), - (0x1D733, 'M', 'ψ'), - (0x1D734, 'M', 'ω'), - (0x1D735, 'M', '∇'), - (0x1D736, 'M', 'α'), - (0x1D737, 'M', 'β'), - (0x1D738, 'M', 'γ'), - (0x1D739, 'M', 'δ'), - (0x1D73A, 'M', 'ε'), - (0x1D73B, 'M', 'ζ'), - (0x1D73C, 'M', 'η'), - (0x1D73D, 'M', 'θ'), - (0x1D73E, 'M', 'ι'), - (0x1D73F, 'M', 'κ'), - (0x1D740, 'M', 'λ'), - (0x1D741, 'M', 'μ'), - (0x1D742, 'M', 'ν'), - (0x1D743, 'M', 'ξ'), - (0x1D744, 'M', 'ο'), - (0x1D745, 'M', 'π'), - (0x1D746, 'M', 'ρ'), - (0x1D747, 'M', 'σ'), - (0x1D749, 'M', 'τ'), - (0x1D74A, 'M', 'υ'), - ] - -def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D74B, 'M', 'φ'), - (0x1D74C, 'M', 'χ'), - (0x1D74D, 'M', 'ψ'), - (0x1D74E, 'M', 'ω'), - (0x1D74F, 'M', '∂'), - (0x1D750, 'M', 'ε'), - (0x1D751, 'M', 'θ'), - (0x1D752, 'M', 'κ'), - (0x1D753, 'M', 'φ'), - (0x1D754, 'M', 'ρ'), - (0x1D755, 'M', 'π'), - (0x1D756, 'M', 'α'), - (0x1D757, 'M', 'β'), - (0x1D758, 'M', 'γ'), - (0x1D759, 'M', 'δ'), - (0x1D75A, 'M', 'ε'), - (0x1D75B, 'M', 'ζ'), - (0x1D75C, 'M', 'η'), - (0x1D75D, 'M', 'θ'), - (0x1D75E, 'M', 'ι'), - (0x1D75F, 'M', 'κ'), - (0x1D760, 'M', 'λ'), - (0x1D761, 'M', 'μ'), - (0x1D762, 'M', 'ν'), - (0x1D763, 'M', 'ξ'), - (0x1D764, 'M', 'ο'), - (0x1D765, 'M', 'π'), - (0x1D766, 'M', 'ρ'), - (0x1D767, 'M', 'θ'), - (0x1D768, 'M', 'σ'), - (0x1D769, 'M', 'τ'), - (0x1D76A, 'M', 'υ'), - (0x1D76B, 'M', 'φ'), - (0x1D76C, 'M', 'χ'), - (0x1D76D, 'M', 'ψ'), - (0x1D76E, 'M', 'ω'), - (0x1D76F, 'M', '∇'), - (0x1D770, 'M', 'α'), - (0x1D771, 'M', 'β'), - (0x1D772, 'M', 'γ'), - (0x1D773, 'M', 'δ'), - (0x1D774, 'M', 'ε'), - (0x1D775, 'M', 'ζ'), - (0x1D776, 'M', 'η'), - (0x1D777, 'M', 'θ'), - (0x1D778, 'M', 'ι'), - (0x1D779, 'M', 'κ'), - (0x1D77A, 'M', 'λ'), - (0x1D77B, 'M', 'μ'), - (0x1D77C, 'M', 'ν'), - (0x1D77D, 'M', 'ξ'), - (0x1D77E, 'M', 'ο'), - (0x1D77F, 'M', 'π'), - (0x1D780, 'M', 'ρ'), - (0x1D781, 'M', 'σ'), - (0x1D783, 'M', 'τ'), - (0x1D784, 'M', 'υ'), - (0x1D785, 'M', 'φ'), - (0x1D786, 'M', 'χ'), - (0x1D787, 'M', 'ψ'), - (0x1D788, 'M', 'ω'), - (0x1D789, 'M', '∂'), - (0x1D78A, 'M', 'ε'), - (0x1D78B, 'M', 'θ'), - (0x1D78C, 'M', 'κ'), - (0x1D78D, 'M', 'φ'), - (0x1D78E, 'M', 'ρ'), - (0x1D78F, 'M', 'π'), - (0x1D790, 'M', 'α'), - (0x1D791, 'M', 'β'), - (0x1D792, 'M', 'γ'), - (0x1D793, 'M', 'δ'), - (0x1D794, 'M', 'ε'), - (0x1D795, 'M', 'ζ'), - (0x1D796, 'M', 'η'), - (0x1D797, 'M', 'θ'), - (0x1D798, 'M', 'ι'), - (0x1D799, 'M', 'κ'), - (0x1D79A, 'M', 'λ'), - (0x1D79B, 'M', 'μ'), - (0x1D79C, 'M', 'ν'), - (0x1D79D, 'M', 'ξ'), - (0x1D79E, 'M', 'ο'), - (0x1D79F, 'M', 'π'), - (0x1D7A0, 'M', 'ρ'), - (0x1D7A1, 'M', 'θ'), - (0x1D7A2, 'M', 'σ'), - (0x1D7A3, 'M', 'τ'), - (0x1D7A4, 'M', 'υ'), - (0x1D7A5, 'M', 'φ'), - (0x1D7A6, 'M', 'χ'), - (0x1D7A7, 'M', 'ψ'), - (0x1D7A8, 'M', 'ω'), - (0x1D7A9, 'M', '∇'), - (0x1D7AA, 'M', 'α'), - (0x1D7AB, 'M', 'β'), - (0x1D7AC, 'M', 'γ'), - (0x1D7AD, 'M', 'δ'), - (0x1D7AE, 'M', 'ε'), - (0x1D7AF, 'M', 'ζ'), - ] - -def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1D7B0, 'M', 'η'), - (0x1D7B1, 'M', 'θ'), - (0x1D7B2, 'M', 'ι'), - (0x1D7B3, 'M', 'κ'), - (0x1D7B4, 'M', 'λ'), - (0x1D7B5, 'M', 'μ'), - (0x1D7B6, 'M', 'ν'), - (0x1D7B7, 'M', 'ξ'), - (0x1D7B8, 'M', 'ο'), - (0x1D7B9, 'M', 'π'), - (0x1D7BA, 'M', 'ρ'), - (0x1D7BB, 'M', 'σ'), - (0x1D7BD, 'M', 'τ'), - (0x1D7BE, 'M', 'υ'), - (0x1D7BF, 'M', 'φ'), - (0x1D7C0, 'M', 'χ'), - (0x1D7C1, 'M', 'ψ'), - (0x1D7C2, 'M', 'ω'), - (0x1D7C3, 'M', '∂'), - (0x1D7C4, 'M', 'ε'), - (0x1D7C5, 'M', 'θ'), - (0x1D7C6, 'M', 'κ'), - (0x1D7C7, 'M', 'φ'), - (0x1D7C8, 'M', 'ρ'), - (0x1D7C9, 'M', 'π'), - (0x1D7CA, 'M', 'ϝ'), - (0x1D7CC, 'X'), - (0x1D7CE, 'M', '0'), - (0x1D7CF, 'M', '1'), - (0x1D7D0, 'M', '2'), - (0x1D7D1, 'M', '3'), - (0x1D7D2, 'M', '4'), - (0x1D7D3, 'M', '5'), - (0x1D7D4, 'M', '6'), - (0x1D7D5, 'M', '7'), - (0x1D7D6, 'M', '8'), - (0x1D7D7, 'M', '9'), - (0x1D7D8, 'M', '0'), - (0x1D7D9, 'M', '1'), - (0x1D7DA, 'M', '2'), - (0x1D7DB, 'M', '3'), - (0x1D7DC, 'M', '4'), - (0x1D7DD, 'M', '5'), - (0x1D7DE, 'M', '6'), - (0x1D7DF, 'M', '7'), - (0x1D7E0, 'M', '8'), - (0x1D7E1, 'M', '9'), - (0x1D7E2, 'M', '0'), - (0x1D7E3, 'M', '1'), - (0x1D7E4, 'M', '2'), - (0x1D7E5, 'M', '3'), - (0x1D7E6, 'M', '4'), - (0x1D7E7, 'M', '5'), - (0x1D7E8, 'M', '6'), - (0x1D7E9, 'M', '7'), - (0x1D7EA, 'M', '8'), - (0x1D7EB, 'M', '9'), - (0x1D7EC, 'M', '0'), - (0x1D7ED, 'M', '1'), - (0x1D7EE, 'M', '2'), - (0x1D7EF, 'M', '3'), - (0x1D7F0, 'M', '4'), - (0x1D7F1, 'M', '5'), - (0x1D7F2, 'M', '6'), - (0x1D7F3, 'M', '7'), - (0x1D7F4, 'M', '8'), - (0x1D7F5, 'M', '9'), - (0x1D7F6, 'M', '0'), - (0x1D7F7, 'M', '1'), - (0x1D7F8, 'M', '2'), - (0x1D7F9, 'M', '3'), - (0x1D7FA, 'M', '4'), - (0x1D7FB, 'M', '5'), - (0x1D7FC, 'M', '6'), - (0x1D7FD, 'M', '7'), - (0x1D7FE, 'M', '8'), - (0x1D7FF, 'M', '9'), - (0x1D800, 'V'), - (0x1DA8C, 'X'), - (0x1DA9B, 'V'), - (0x1DAA0, 'X'), - (0x1DAA1, 'V'), - (0x1DAB0, 'X'), - (0x1DF00, 'V'), - (0x1DF1F, 'X'), - (0x1DF25, 'V'), - (0x1DF2B, 'X'), - (0x1E000, 'V'), - (0x1E007, 'X'), - (0x1E008, 'V'), - (0x1E019, 'X'), - (0x1E01B, 'V'), - (0x1E022, 'X'), - (0x1E023, 'V'), - (0x1E025, 'X'), - (0x1E026, 'V'), - (0x1E02B, 'X'), - (0x1E030, 'M', 'а'), - (0x1E031, 'M', 'б'), - (0x1E032, 'M', 'в'), - ] - -def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E033, 'M', 'г'), - (0x1E034, 'M', 'д'), - (0x1E035, 'M', 'е'), - (0x1E036, 'M', 'ж'), - (0x1E037, 'M', 'з'), - (0x1E038, 'M', 'и'), - (0x1E039, 'M', 'к'), - (0x1E03A, 'M', 'л'), - (0x1E03B, 'M', 'м'), - (0x1E03C, 'M', 'о'), - (0x1E03D, 'M', 'п'), - (0x1E03E, 'M', 'р'), - (0x1E03F, 'M', 'с'), - (0x1E040, 'M', 'т'), - (0x1E041, 'M', 'у'), - (0x1E042, 'M', 'ф'), - (0x1E043, 'M', 'х'), - (0x1E044, 'M', 'ц'), - (0x1E045, 'M', 'ч'), - (0x1E046, 'M', 'ш'), - (0x1E047, 'M', 'ы'), - (0x1E048, 'M', 'э'), - (0x1E049, 'M', 'ю'), - (0x1E04A, 'M', 'ꚉ'), - (0x1E04B, 'M', 'ә'), - (0x1E04C, 'M', 'і'), - (0x1E04D, 'M', 'ј'), - (0x1E04E, 'M', 'ө'), - (0x1E04F, 'M', 'ү'), - (0x1E050, 'M', 'ӏ'), - (0x1E051, 'M', 'а'), - (0x1E052, 'M', 'б'), - (0x1E053, 'M', 'в'), - (0x1E054, 'M', 'г'), - (0x1E055, 'M', 'д'), - (0x1E056, 'M', 'е'), - (0x1E057, 'M', 'ж'), - (0x1E058, 'M', 'з'), - (0x1E059, 'M', 'и'), - (0x1E05A, 'M', 'к'), - (0x1E05B, 'M', 'л'), - (0x1E05C, 'M', 'о'), - (0x1E05D, 'M', 'п'), - (0x1E05E, 'M', 'с'), - (0x1E05F, 'M', 'у'), - (0x1E060, 'M', 'ф'), - (0x1E061, 'M', 'х'), - (0x1E062, 'M', 'ц'), - (0x1E063, 'M', 'ч'), - (0x1E064, 'M', 'ш'), - (0x1E065, 'M', 'ъ'), - (0x1E066, 'M', 'ы'), - (0x1E067, 'M', 'ґ'), - (0x1E068, 'M', 'і'), - (0x1E069, 'M', 'ѕ'), - (0x1E06A, 'M', 'џ'), - (0x1E06B, 'M', 'ҫ'), - (0x1E06C, 'M', 'ꙑ'), - (0x1E06D, 'M', 'ұ'), - (0x1E06E, 'X'), - (0x1E08F, 'V'), - (0x1E090, 'X'), - (0x1E100, 'V'), - (0x1E12D, 'X'), - (0x1E130, 'V'), - (0x1E13E, 'X'), - (0x1E140, 'V'), - (0x1E14A, 'X'), - (0x1E14E, 'V'), - (0x1E150, 'X'), - (0x1E290, 'V'), - (0x1E2AF, 'X'), - (0x1E2C0, 'V'), - (0x1E2FA, 'X'), - (0x1E2FF, 'V'), - (0x1E300, 'X'), - (0x1E4D0, 'V'), - (0x1E4FA, 'X'), - (0x1E7E0, 'V'), - (0x1E7E7, 'X'), - (0x1E7E8, 'V'), - (0x1E7EC, 'X'), - (0x1E7ED, 'V'), - (0x1E7EF, 'X'), - (0x1E7F0, 'V'), - (0x1E7FF, 'X'), - (0x1E800, 'V'), - (0x1E8C5, 'X'), - (0x1E8C7, 'V'), - (0x1E8D7, 'X'), - (0x1E900, 'M', '𞤢'), - (0x1E901, 'M', '𞤣'), - (0x1E902, 'M', '𞤤'), - (0x1E903, 'M', '𞤥'), - (0x1E904, 'M', '𞤦'), - (0x1E905, 'M', '𞤧'), - (0x1E906, 'M', '𞤨'), - (0x1E907, 'M', '𞤩'), - (0x1E908, 'M', '𞤪'), - (0x1E909, 'M', '𞤫'), - ] - -def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1E90A, 'M', '𞤬'), - (0x1E90B, 'M', '𞤭'), - (0x1E90C, 'M', '𞤮'), - (0x1E90D, 'M', '𞤯'), - (0x1E90E, 'M', '𞤰'), - (0x1E90F, 'M', '𞤱'), - (0x1E910, 'M', '𞤲'), - (0x1E911, 'M', '𞤳'), - (0x1E912, 'M', '𞤴'), - (0x1E913, 'M', '𞤵'), - (0x1E914, 'M', '𞤶'), - (0x1E915, 'M', '𞤷'), - (0x1E916, 'M', '𞤸'), - (0x1E917, 'M', '𞤹'), - (0x1E918, 'M', '𞤺'), - (0x1E919, 'M', '𞤻'), - (0x1E91A, 'M', '𞤼'), - (0x1E91B, 'M', '𞤽'), - (0x1E91C, 'M', '𞤾'), - (0x1E91D, 'M', '𞤿'), - (0x1E91E, 'M', '𞥀'), - (0x1E91F, 'M', '𞥁'), - (0x1E920, 'M', '𞥂'), - (0x1E921, 'M', '𞥃'), - (0x1E922, 'V'), - (0x1E94C, 'X'), - (0x1E950, 'V'), - (0x1E95A, 'X'), - (0x1E95E, 'V'), - (0x1E960, 'X'), - (0x1EC71, 'V'), - (0x1ECB5, 'X'), - (0x1ED01, 'V'), - (0x1ED3E, 'X'), - (0x1EE00, 'M', 'ا'), - (0x1EE01, 'M', 'ب'), - (0x1EE02, 'M', 'ج'), - (0x1EE03, 'M', 'د'), - (0x1EE04, 'X'), - (0x1EE05, 'M', 'و'), - (0x1EE06, 'M', 'ز'), - (0x1EE07, 'M', 'ح'), - (0x1EE08, 'M', 'ط'), - (0x1EE09, 'M', 'ي'), - (0x1EE0A, 'M', 'ك'), - (0x1EE0B, 'M', 'ل'), - (0x1EE0C, 'M', 'م'), - (0x1EE0D, 'M', 'ن'), - (0x1EE0E, 'M', 'س'), - (0x1EE0F, 'M', 'ع'), - (0x1EE10, 'M', 'ف'), - (0x1EE11, 'M', 'ص'), - (0x1EE12, 'M', 'ق'), - (0x1EE13, 'M', 'ر'), - (0x1EE14, 'M', 'ش'), - (0x1EE15, 'M', 'ت'), - (0x1EE16, 'M', 'ث'), - (0x1EE17, 'M', 'خ'), - (0x1EE18, 'M', 'ذ'), - (0x1EE19, 'M', 'ض'), - (0x1EE1A, 'M', 'ظ'), - (0x1EE1B, 'M', 'غ'), - (0x1EE1C, 'M', 'ٮ'), - (0x1EE1D, 'M', 'ں'), - (0x1EE1E, 'M', 'ڡ'), - (0x1EE1F, 'M', 'ٯ'), - (0x1EE20, 'X'), - (0x1EE21, 'M', 'ب'), - (0x1EE22, 'M', 'ج'), - (0x1EE23, 'X'), - (0x1EE24, 'M', 'ه'), - (0x1EE25, 'X'), - (0x1EE27, 'M', 'ح'), - (0x1EE28, 'X'), - (0x1EE29, 'M', 'ي'), - (0x1EE2A, 'M', 'ك'), - (0x1EE2B, 'M', 'ل'), - (0x1EE2C, 'M', 'م'), - (0x1EE2D, 'M', 'ن'), - (0x1EE2E, 'M', 'س'), - (0x1EE2F, 'M', 'ع'), - (0x1EE30, 'M', 'ف'), - (0x1EE31, 'M', 'ص'), - (0x1EE32, 'M', 'ق'), - (0x1EE33, 'X'), - (0x1EE34, 'M', 'ش'), - (0x1EE35, 'M', 'ت'), - (0x1EE36, 'M', 'ث'), - (0x1EE37, 'M', 'خ'), - (0x1EE38, 'X'), - (0x1EE39, 'M', 'ض'), - (0x1EE3A, 'X'), - (0x1EE3B, 'M', 'غ'), - (0x1EE3C, 'X'), - (0x1EE42, 'M', 'ج'), - (0x1EE43, 'X'), - (0x1EE47, 'M', 'ح'), - (0x1EE48, 'X'), - (0x1EE49, 'M', 'ي'), - (0x1EE4A, 'X'), - ] - -def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1EE4B, 'M', 'ل'), - (0x1EE4C, 'X'), - (0x1EE4D, 'M', 'ن'), - (0x1EE4E, 'M', 'س'), - (0x1EE4F, 'M', 'ع'), - (0x1EE50, 'X'), - (0x1EE51, 'M', 'ص'), - (0x1EE52, 'M', 'ق'), - (0x1EE53, 'X'), - (0x1EE54, 'M', 'ش'), - (0x1EE55, 'X'), - (0x1EE57, 'M', 'خ'), - (0x1EE58, 'X'), - (0x1EE59, 'M', 'ض'), - (0x1EE5A, 'X'), - (0x1EE5B, 'M', 'غ'), - (0x1EE5C, 'X'), - (0x1EE5D, 'M', 'ں'), - (0x1EE5E, 'X'), - (0x1EE5F, 'M', 'ٯ'), - (0x1EE60, 'X'), - (0x1EE61, 'M', 'ب'), - (0x1EE62, 'M', 'ج'), - (0x1EE63, 'X'), - (0x1EE64, 'M', 'ه'), - (0x1EE65, 'X'), - (0x1EE67, 'M', 'ح'), - (0x1EE68, 'M', 'ط'), - (0x1EE69, 'M', 'ي'), - (0x1EE6A, 'M', 'ك'), - (0x1EE6B, 'X'), - (0x1EE6C, 'M', 'م'), - (0x1EE6D, 'M', 'ن'), - (0x1EE6E, 'M', 'س'), - (0x1EE6F, 'M', 'ع'), - (0x1EE70, 'M', 'ف'), - (0x1EE71, 'M', 'ص'), - (0x1EE72, 'M', 'ق'), - (0x1EE73, 'X'), - (0x1EE74, 'M', 'ش'), - (0x1EE75, 'M', 'ت'), - (0x1EE76, 'M', 'ث'), - (0x1EE77, 'M', 'خ'), - (0x1EE78, 'X'), - (0x1EE79, 'M', 'ض'), - (0x1EE7A, 'M', 'ظ'), - (0x1EE7B, 'M', 'غ'), - (0x1EE7C, 'M', 'ٮ'), - (0x1EE7D, 'X'), - (0x1EE7E, 'M', 'ڡ'), - (0x1EE7F, 'X'), - (0x1EE80, 'M', 'ا'), - (0x1EE81, 'M', 'ب'), - (0x1EE82, 'M', 'ج'), - (0x1EE83, 'M', 'د'), - (0x1EE84, 'M', 'ه'), - (0x1EE85, 'M', 'و'), - (0x1EE86, 'M', 'ز'), - (0x1EE87, 'M', 'ح'), - (0x1EE88, 'M', 'ط'), - (0x1EE89, 'M', 'ي'), - (0x1EE8A, 'X'), - (0x1EE8B, 'M', 'ل'), - (0x1EE8C, 'M', 'م'), - (0x1EE8D, 'M', 'ن'), - (0x1EE8E, 'M', 'س'), - (0x1EE8F, 'M', 'ع'), - (0x1EE90, 'M', 'ف'), - (0x1EE91, 'M', 'ص'), - (0x1EE92, 'M', 'ق'), - (0x1EE93, 'M', 'ر'), - (0x1EE94, 'M', 'ش'), - (0x1EE95, 'M', 'ت'), - (0x1EE96, 'M', 'ث'), - (0x1EE97, 'M', 'خ'), - (0x1EE98, 'M', 'ذ'), - (0x1EE99, 'M', 'ض'), - (0x1EE9A, 'M', 'ظ'), - (0x1EE9B, 'M', 'غ'), - (0x1EE9C, 'X'), - (0x1EEA1, 'M', 'ب'), - (0x1EEA2, 'M', 'ج'), - (0x1EEA3, 'M', 'د'), - (0x1EEA4, 'X'), - (0x1EEA5, 'M', 'و'), - (0x1EEA6, 'M', 'ز'), - (0x1EEA7, 'M', 'ح'), - (0x1EEA8, 'M', 'ط'), - (0x1EEA9, 'M', 'ي'), - (0x1EEAA, 'X'), - (0x1EEAB, 'M', 'ل'), - (0x1EEAC, 'M', 'م'), - (0x1EEAD, 'M', 'ن'), - (0x1EEAE, 'M', 'س'), - (0x1EEAF, 'M', 'ع'), - (0x1EEB0, 'M', 'ف'), - (0x1EEB1, 'M', 'ص'), - (0x1EEB2, 'M', 'ق'), - (0x1EEB3, 'M', 'ر'), - (0x1EEB4, 'M', 'ش'), - ] - -def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1EEB5, 'M', 'ت'), - (0x1EEB6, 'M', 'ث'), - (0x1EEB7, 'M', 'خ'), - (0x1EEB8, 'M', 'ذ'), - (0x1EEB9, 'M', 'ض'), - (0x1EEBA, 'M', 'ظ'), - (0x1EEBB, 'M', 'غ'), - (0x1EEBC, 'X'), - (0x1EEF0, 'V'), - (0x1EEF2, 'X'), - (0x1F000, 'V'), - (0x1F02C, 'X'), - (0x1F030, 'V'), - (0x1F094, 'X'), - (0x1F0A0, 'V'), - (0x1F0AF, 'X'), - (0x1F0B1, 'V'), - (0x1F0C0, 'X'), - (0x1F0C1, 'V'), - (0x1F0D0, 'X'), - (0x1F0D1, 'V'), - (0x1F0F6, 'X'), - (0x1F101, '3', '0,'), - (0x1F102, '3', '1,'), - (0x1F103, '3', '2,'), - (0x1F104, '3', '3,'), - (0x1F105, '3', '4,'), - (0x1F106, '3', '5,'), - (0x1F107, '3', '6,'), - (0x1F108, '3', '7,'), - (0x1F109, '3', '8,'), - (0x1F10A, '3', '9,'), - (0x1F10B, 'V'), - (0x1F110, '3', '(a)'), - (0x1F111, '3', '(b)'), - (0x1F112, '3', '(c)'), - (0x1F113, '3', '(d)'), - (0x1F114, '3', '(e)'), - (0x1F115, '3', '(f)'), - (0x1F116, '3', '(g)'), - (0x1F117, '3', '(h)'), - (0x1F118, '3', '(i)'), - (0x1F119, '3', '(j)'), - (0x1F11A, '3', '(k)'), - (0x1F11B, '3', '(l)'), - (0x1F11C, '3', '(m)'), - (0x1F11D, '3', '(n)'), - (0x1F11E, '3', '(o)'), - (0x1F11F, '3', '(p)'), - (0x1F120, '3', '(q)'), - (0x1F121, '3', '(r)'), - (0x1F122, '3', '(s)'), - (0x1F123, '3', '(t)'), - (0x1F124, '3', '(u)'), - (0x1F125, '3', '(v)'), - (0x1F126, '3', '(w)'), - (0x1F127, '3', '(x)'), - (0x1F128, '3', '(y)'), - (0x1F129, '3', '(z)'), - (0x1F12A, 'M', '〔s〕'), - (0x1F12B, 'M', 'c'), - (0x1F12C, 'M', 'r'), - (0x1F12D, 'M', 'cd'), - (0x1F12E, 'M', 'wz'), - (0x1F12F, 'V'), - (0x1F130, 'M', 'a'), - (0x1F131, 'M', 'b'), - (0x1F132, 'M', 'c'), - (0x1F133, 'M', 'd'), - (0x1F134, 'M', 'e'), - (0x1F135, 'M', 'f'), - (0x1F136, 'M', 'g'), - (0x1F137, 'M', 'h'), - (0x1F138, 'M', 'i'), - (0x1F139, 'M', 'j'), - (0x1F13A, 'M', 'k'), - (0x1F13B, 'M', 'l'), - (0x1F13C, 'M', 'm'), - (0x1F13D, 'M', 'n'), - (0x1F13E, 'M', 'o'), - (0x1F13F, 'M', 'p'), - (0x1F140, 'M', 'q'), - (0x1F141, 'M', 'r'), - (0x1F142, 'M', 's'), - (0x1F143, 'M', 't'), - (0x1F144, 'M', 'u'), - (0x1F145, 'M', 'v'), - (0x1F146, 'M', 'w'), - (0x1F147, 'M', 'x'), - (0x1F148, 'M', 'y'), - (0x1F149, 'M', 'z'), - (0x1F14A, 'M', 'hv'), - (0x1F14B, 'M', 'mv'), - (0x1F14C, 'M', 'sd'), - (0x1F14D, 'M', 'ss'), - (0x1F14E, 'M', 'ppv'), - (0x1F14F, 'M', 'wc'), - (0x1F150, 'V'), - (0x1F16A, 'M', 'mc'), - (0x1F16B, 'M', 'md'), - ] - -def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1F16C, 'M', 'mr'), - (0x1F16D, 'V'), - (0x1F190, 'M', 'dj'), - (0x1F191, 'V'), - (0x1F1AE, 'X'), - (0x1F1E6, 'V'), - (0x1F200, 'M', 'ほか'), - (0x1F201, 'M', 'ココ'), - (0x1F202, 'M', 'サ'), - (0x1F203, 'X'), - (0x1F210, 'M', '手'), - (0x1F211, 'M', '字'), - (0x1F212, 'M', '双'), - (0x1F213, 'M', 'デ'), - (0x1F214, 'M', '二'), - (0x1F215, 'M', '多'), - (0x1F216, 'M', '解'), - (0x1F217, 'M', '天'), - (0x1F218, 'M', '交'), - (0x1F219, 'M', '映'), - (0x1F21A, 'M', '無'), - (0x1F21B, 'M', '料'), - (0x1F21C, 'M', '前'), - (0x1F21D, 'M', '後'), - (0x1F21E, 'M', '再'), - (0x1F21F, 'M', '新'), - (0x1F220, 'M', '初'), - (0x1F221, 'M', '終'), - (0x1F222, 'M', '生'), - (0x1F223, 'M', '販'), - (0x1F224, 'M', '声'), - (0x1F225, 'M', '吹'), - (0x1F226, 'M', '演'), - (0x1F227, 'M', '投'), - (0x1F228, 'M', '捕'), - (0x1F229, 'M', '一'), - (0x1F22A, 'M', '三'), - (0x1F22B, 'M', '遊'), - (0x1F22C, 'M', '左'), - (0x1F22D, 'M', '中'), - (0x1F22E, 'M', '右'), - (0x1F22F, 'M', '指'), - (0x1F230, 'M', '走'), - (0x1F231, 'M', '打'), - (0x1F232, 'M', '禁'), - (0x1F233, 'M', '空'), - (0x1F234, 'M', '合'), - (0x1F235, 'M', '満'), - (0x1F236, 'M', '有'), - (0x1F237, 'M', '月'), - (0x1F238, 'M', '申'), - (0x1F239, 'M', '割'), - (0x1F23A, 'M', '営'), - (0x1F23B, 'M', '配'), - (0x1F23C, 'X'), - (0x1F240, 'M', '〔本〕'), - (0x1F241, 'M', '〔三〕'), - (0x1F242, 'M', '〔二〕'), - (0x1F243, 'M', '〔安〕'), - (0x1F244, 'M', '〔点〕'), - (0x1F245, 'M', '〔打〕'), - (0x1F246, 'M', '〔盗〕'), - (0x1F247, 'M', '〔勝〕'), - (0x1F248, 'M', '〔敗〕'), - (0x1F249, 'X'), - (0x1F250, 'M', '得'), - (0x1F251, 'M', '可'), - (0x1F252, 'X'), - (0x1F260, 'V'), - (0x1F266, 'X'), - (0x1F300, 'V'), - (0x1F6D8, 'X'), - (0x1F6DC, 'V'), - (0x1F6ED, 'X'), - (0x1F6F0, 'V'), - (0x1F6FD, 'X'), - (0x1F700, 'V'), - (0x1F777, 'X'), - (0x1F77B, 'V'), - (0x1F7DA, 'X'), - (0x1F7E0, 'V'), - (0x1F7EC, 'X'), - (0x1F7F0, 'V'), - (0x1F7F1, 'X'), - (0x1F800, 'V'), - (0x1F80C, 'X'), - (0x1F810, 'V'), - (0x1F848, 'X'), - (0x1F850, 'V'), - (0x1F85A, 'X'), - (0x1F860, 'V'), - (0x1F888, 'X'), - (0x1F890, 'V'), - (0x1F8AE, 'X'), - (0x1F8B0, 'V'), - (0x1F8B2, 'X'), - (0x1F900, 'V'), - (0x1FA54, 'X'), - (0x1FA60, 'V'), - (0x1FA6E, 'X'), - ] - -def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x1FA70, 'V'), - (0x1FA7D, 'X'), - (0x1FA80, 'V'), - (0x1FA89, 'X'), - (0x1FA90, 'V'), - (0x1FABE, 'X'), - (0x1FABF, 'V'), - (0x1FAC6, 'X'), - (0x1FACE, 'V'), - (0x1FADC, 'X'), - (0x1FAE0, 'V'), - (0x1FAE9, 'X'), - (0x1FAF0, 'V'), - (0x1FAF9, 'X'), - (0x1FB00, 'V'), - (0x1FB93, 'X'), - (0x1FB94, 'V'), - (0x1FBCB, 'X'), - (0x1FBF0, 'M', '0'), - (0x1FBF1, 'M', '1'), - (0x1FBF2, 'M', '2'), - (0x1FBF3, 'M', '3'), - (0x1FBF4, 'M', '4'), - (0x1FBF5, 'M', '5'), - (0x1FBF6, 'M', '6'), - (0x1FBF7, 'M', '7'), - (0x1FBF8, 'M', '8'), - (0x1FBF9, 'M', '9'), - (0x1FBFA, 'X'), - (0x20000, 'V'), - (0x2A6E0, 'X'), - (0x2A700, 'V'), - (0x2B73A, 'X'), - (0x2B740, 'V'), - (0x2B81E, 'X'), - (0x2B820, 'V'), - (0x2CEA2, 'X'), - (0x2CEB0, 'V'), - (0x2EBE1, 'X'), - (0x2F800, 'M', '丽'), - (0x2F801, 'M', '丸'), - (0x2F802, 'M', '乁'), - (0x2F803, 'M', '𠄢'), - (0x2F804, 'M', '你'), - (0x2F805, 'M', '侮'), - (0x2F806, 'M', '侻'), - (0x2F807, 'M', '倂'), - (0x2F808, 'M', '偺'), - (0x2F809, 'M', '備'), - (0x2F80A, 'M', '僧'), - (0x2F80B, 'M', '像'), - (0x2F80C, 'M', '㒞'), - (0x2F80D, 'M', '𠘺'), - (0x2F80E, 'M', '免'), - (0x2F80F, 'M', '兔'), - (0x2F810, 'M', '兤'), - (0x2F811, 'M', '具'), - (0x2F812, 'M', '𠔜'), - (0x2F813, 'M', '㒹'), - (0x2F814, 'M', '內'), - (0x2F815, 'M', '再'), - (0x2F816, 'M', '𠕋'), - (0x2F817, 'M', '冗'), - (0x2F818, 'M', '冤'), - (0x2F819, 'M', '仌'), - (0x2F81A, 'M', '冬'), - (0x2F81B, 'M', '况'), - (0x2F81C, 'M', '𩇟'), - (0x2F81D, 'M', '凵'), - (0x2F81E, 'M', '刃'), - (0x2F81F, 'M', '㓟'), - (0x2F820, 'M', '刻'), - (0x2F821, 'M', '剆'), - (0x2F822, 'M', '割'), - (0x2F823, 'M', '剷'), - (0x2F824, 'M', '㔕'), - (0x2F825, 'M', '勇'), - (0x2F826, 'M', '勉'), - (0x2F827, 'M', '勤'), - (0x2F828, 'M', '勺'), - (0x2F829, 'M', '包'), - (0x2F82A, 'M', '匆'), - (0x2F82B, 'M', '北'), - (0x2F82C, 'M', '卉'), - (0x2F82D, 'M', '卑'), - (0x2F82E, 'M', '博'), - (0x2F82F, 'M', '即'), - (0x2F830, 'M', '卽'), - (0x2F831, 'M', '卿'), - (0x2F834, 'M', '𠨬'), - (0x2F835, 'M', '灰'), - (0x2F836, 'M', '及'), - (0x2F837, 'M', '叟'), - (0x2F838, 'M', '𠭣'), - (0x2F839, 'M', '叫'), - (0x2F83A, 'M', '叱'), - (0x2F83B, 'M', '吆'), - (0x2F83C, 'M', '咞'), - (0x2F83D, 'M', '吸'), - (0x2F83E, 'M', '呈'), - ] - -def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F83F, 'M', '周'), - (0x2F840, 'M', '咢'), - (0x2F841, 'M', '哶'), - (0x2F842, 'M', '唐'), - (0x2F843, 'M', '啓'), - (0x2F844, 'M', '啣'), - (0x2F845, 'M', '善'), - (0x2F847, 'M', '喙'), - (0x2F848, 'M', '喫'), - (0x2F849, 'M', '喳'), - (0x2F84A, 'M', '嗂'), - (0x2F84B, 'M', '圖'), - (0x2F84C, 'M', '嘆'), - (0x2F84D, 'M', '圗'), - (0x2F84E, 'M', '噑'), - (0x2F84F, 'M', '噴'), - (0x2F850, 'M', '切'), - (0x2F851, 'M', '壮'), - (0x2F852, 'M', '城'), - (0x2F853, 'M', '埴'), - (0x2F854, 'M', '堍'), - (0x2F855, 'M', '型'), - (0x2F856, 'M', '堲'), - (0x2F857, 'M', '報'), - (0x2F858, 'M', '墬'), - (0x2F859, 'M', '𡓤'), - (0x2F85A, 'M', '売'), - (0x2F85B, 'M', '壷'), - (0x2F85C, 'M', '夆'), - (0x2F85D, 'M', '多'), - (0x2F85E, 'M', '夢'), - (0x2F85F, 'M', '奢'), - (0x2F860, 'M', '𡚨'), - (0x2F861, 'M', '𡛪'), - (0x2F862, 'M', '姬'), - (0x2F863, 'M', '娛'), - (0x2F864, 'M', '娧'), - (0x2F865, 'M', '姘'), - (0x2F866, 'M', '婦'), - (0x2F867, 'M', '㛮'), - (0x2F868, 'X'), - (0x2F869, 'M', '嬈'), - (0x2F86A, 'M', '嬾'), - (0x2F86C, 'M', '𡧈'), - (0x2F86D, 'M', '寃'), - (0x2F86E, 'M', '寘'), - (0x2F86F, 'M', '寧'), - (0x2F870, 'M', '寳'), - (0x2F871, 'M', '𡬘'), - (0x2F872, 'M', '寿'), - (0x2F873, 'M', '将'), - (0x2F874, 'X'), - (0x2F875, 'M', '尢'), - (0x2F876, 'M', '㞁'), - (0x2F877, 'M', '屠'), - (0x2F878, 'M', '屮'), - (0x2F879, 'M', '峀'), - (0x2F87A, 'M', '岍'), - (0x2F87B, 'M', '𡷤'), - (0x2F87C, 'M', '嵃'), - (0x2F87D, 'M', '𡷦'), - (0x2F87E, 'M', '嵮'), - (0x2F87F, 'M', '嵫'), - (0x2F880, 'M', '嵼'), - (0x2F881, 'M', '巡'), - (0x2F882, 'M', '巢'), - (0x2F883, 'M', '㠯'), - (0x2F884, 'M', '巽'), - (0x2F885, 'M', '帨'), - (0x2F886, 'M', '帽'), - (0x2F887, 'M', '幩'), - (0x2F888, 'M', '㡢'), - (0x2F889, 'M', '𢆃'), - (0x2F88A, 'M', '㡼'), - (0x2F88B, 'M', '庰'), - (0x2F88C, 'M', '庳'), - (0x2F88D, 'M', '庶'), - (0x2F88E, 'M', '廊'), - (0x2F88F, 'M', '𪎒'), - (0x2F890, 'M', '廾'), - (0x2F891, 'M', '𢌱'), - (0x2F893, 'M', '舁'), - (0x2F894, 'M', '弢'), - (0x2F896, 'M', '㣇'), - (0x2F897, 'M', '𣊸'), - (0x2F898, 'M', '𦇚'), - (0x2F899, 'M', '形'), - (0x2F89A, 'M', '彫'), - (0x2F89B, 'M', '㣣'), - (0x2F89C, 'M', '徚'), - (0x2F89D, 'M', '忍'), - (0x2F89E, 'M', '志'), - (0x2F89F, 'M', '忹'), - (0x2F8A0, 'M', '悁'), - (0x2F8A1, 'M', '㤺'), - (0x2F8A2, 'M', '㤜'), - (0x2F8A3, 'M', '悔'), - (0x2F8A4, 'M', '𢛔'), - (0x2F8A5, 'M', '惇'), - (0x2F8A6, 'M', '慈'), - ] - -def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F8A7, 'M', '慌'), - (0x2F8A8, 'M', '慎'), - (0x2F8A9, 'M', '慌'), - (0x2F8AA, 'M', '慺'), - (0x2F8AB, 'M', '憎'), - (0x2F8AC, 'M', '憲'), - (0x2F8AD, 'M', '憤'), - (0x2F8AE, 'M', '憯'), - (0x2F8AF, 'M', '懞'), - (0x2F8B0, 'M', '懲'), - (0x2F8B1, 'M', '懶'), - (0x2F8B2, 'M', '成'), - (0x2F8B3, 'M', '戛'), - (0x2F8B4, 'M', '扝'), - (0x2F8B5, 'M', '抱'), - (0x2F8B6, 'M', '拔'), - (0x2F8B7, 'M', '捐'), - (0x2F8B8, 'M', '𢬌'), - (0x2F8B9, 'M', '挽'), - (0x2F8BA, 'M', '拼'), - (0x2F8BB, 'M', '捨'), - (0x2F8BC, 'M', '掃'), - (0x2F8BD, 'M', '揤'), - (0x2F8BE, 'M', '𢯱'), - (0x2F8BF, 'M', '搢'), - (0x2F8C0, 'M', '揅'), - (0x2F8C1, 'M', '掩'), - (0x2F8C2, 'M', '㨮'), - (0x2F8C3, 'M', '摩'), - (0x2F8C4, 'M', '摾'), - (0x2F8C5, 'M', '撝'), - (0x2F8C6, 'M', '摷'), - (0x2F8C7, 'M', '㩬'), - (0x2F8C8, 'M', '敏'), - (0x2F8C9, 'M', '敬'), - (0x2F8CA, 'M', '𣀊'), - (0x2F8CB, 'M', '旣'), - (0x2F8CC, 'M', '書'), - (0x2F8CD, 'M', '晉'), - (0x2F8CE, 'M', '㬙'), - (0x2F8CF, 'M', '暑'), - (0x2F8D0, 'M', '㬈'), - (0x2F8D1, 'M', '㫤'), - (0x2F8D2, 'M', '冒'), - (0x2F8D3, 'M', '冕'), - (0x2F8D4, 'M', '最'), - (0x2F8D5, 'M', '暜'), - (0x2F8D6, 'M', '肭'), - (0x2F8D7, 'M', '䏙'), - (0x2F8D8, 'M', '朗'), - (0x2F8D9, 'M', '望'), - (0x2F8DA, 'M', '朡'), - (0x2F8DB, 'M', '杞'), - (0x2F8DC, 'M', '杓'), - (0x2F8DD, 'M', '𣏃'), - (0x2F8DE, 'M', '㭉'), - (0x2F8DF, 'M', '柺'), - (0x2F8E0, 'M', '枅'), - (0x2F8E1, 'M', '桒'), - (0x2F8E2, 'M', '梅'), - (0x2F8E3, 'M', '𣑭'), - (0x2F8E4, 'M', '梎'), - (0x2F8E5, 'M', '栟'), - (0x2F8E6, 'M', '椔'), - (0x2F8E7, 'M', '㮝'), - (0x2F8E8, 'M', '楂'), - (0x2F8E9, 'M', '榣'), - (0x2F8EA, 'M', '槪'), - (0x2F8EB, 'M', '檨'), - (0x2F8EC, 'M', '𣚣'), - (0x2F8ED, 'M', '櫛'), - (0x2F8EE, 'M', '㰘'), - (0x2F8EF, 'M', '次'), - (0x2F8F0, 'M', '𣢧'), - (0x2F8F1, 'M', '歔'), - (0x2F8F2, 'M', '㱎'), - (0x2F8F3, 'M', '歲'), - (0x2F8F4, 'M', '殟'), - (0x2F8F5, 'M', '殺'), - (0x2F8F6, 'M', '殻'), - (0x2F8F7, 'M', '𣪍'), - (0x2F8F8, 'M', '𡴋'), - (0x2F8F9, 'M', '𣫺'), - (0x2F8FA, 'M', '汎'), - (0x2F8FB, 'M', '𣲼'), - (0x2F8FC, 'M', '沿'), - (0x2F8FD, 'M', '泍'), - (0x2F8FE, 'M', '汧'), - (0x2F8FF, 'M', '洖'), - (0x2F900, 'M', '派'), - (0x2F901, 'M', '海'), - (0x2F902, 'M', '流'), - (0x2F903, 'M', '浩'), - (0x2F904, 'M', '浸'), - (0x2F905, 'M', '涅'), - (0x2F906, 'M', '𣴞'), - (0x2F907, 'M', '洴'), - (0x2F908, 'M', '港'), - (0x2F909, 'M', '湮'), - (0x2F90A, 'M', '㴳'), - ] - -def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F90B, 'M', '滋'), - (0x2F90C, 'M', '滇'), - (0x2F90D, 'M', '𣻑'), - (0x2F90E, 'M', '淹'), - (0x2F90F, 'M', '潮'), - (0x2F910, 'M', '𣽞'), - (0x2F911, 'M', '𣾎'), - (0x2F912, 'M', '濆'), - (0x2F913, 'M', '瀹'), - (0x2F914, 'M', '瀞'), - (0x2F915, 'M', '瀛'), - (0x2F916, 'M', '㶖'), - (0x2F917, 'M', '灊'), - (0x2F918, 'M', '災'), - (0x2F919, 'M', '灷'), - (0x2F91A, 'M', '炭'), - (0x2F91B, 'M', '𠔥'), - (0x2F91C, 'M', '煅'), - (0x2F91D, 'M', '𤉣'), - (0x2F91E, 'M', '熜'), - (0x2F91F, 'X'), - (0x2F920, 'M', '爨'), - (0x2F921, 'M', '爵'), - (0x2F922, 'M', '牐'), - (0x2F923, 'M', '𤘈'), - (0x2F924, 'M', '犀'), - (0x2F925, 'M', '犕'), - (0x2F926, 'M', '𤜵'), - (0x2F927, 'M', '𤠔'), - (0x2F928, 'M', '獺'), - (0x2F929, 'M', '王'), - (0x2F92A, 'M', '㺬'), - (0x2F92B, 'M', '玥'), - (0x2F92C, 'M', '㺸'), - (0x2F92E, 'M', '瑇'), - (0x2F92F, 'M', '瑜'), - (0x2F930, 'M', '瑱'), - (0x2F931, 'M', '璅'), - (0x2F932, 'M', '瓊'), - (0x2F933, 'M', '㼛'), - (0x2F934, 'M', '甤'), - (0x2F935, 'M', '𤰶'), - (0x2F936, 'M', '甾'), - (0x2F937, 'M', '𤲒'), - (0x2F938, 'M', '異'), - (0x2F939, 'M', '𢆟'), - (0x2F93A, 'M', '瘐'), - (0x2F93B, 'M', '𤾡'), - (0x2F93C, 'M', '𤾸'), - (0x2F93D, 'M', '𥁄'), - (0x2F93E, 'M', '㿼'), - (0x2F93F, 'M', '䀈'), - (0x2F940, 'M', '直'), - (0x2F941, 'M', '𥃳'), - (0x2F942, 'M', '𥃲'), - (0x2F943, 'M', '𥄙'), - (0x2F944, 'M', '𥄳'), - (0x2F945, 'M', '眞'), - (0x2F946, 'M', '真'), - (0x2F948, 'M', '睊'), - (0x2F949, 'M', '䀹'), - (0x2F94A, 'M', '瞋'), - (0x2F94B, 'M', '䁆'), - (0x2F94C, 'M', '䂖'), - (0x2F94D, 'M', '𥐝'), - (0x2F94E, 'M', '硎'), - (0x2F94F, 'M', '碌'), - (0x2F950, 'M', '磌'), - (0x2F951, 'M', '䃣'), - (0x2F952, 'M', '𥘦'), - (0x2F953, 'M', '祖'), - (0x2F954, 'M', '𥚚'), - (0x2F955, 'M', '𥛅'), - (0x2F956, 'M', '福'), - (0x2F957, 'M', '秫'), - (0x2F958, 'M', '䄯'), - (0x2F959, 'M', '穀'), - (0x2F95A, 'M', '穊'), - (0x2F95B, 'M', '穏'), - (0x2F95C, 'M', '𥥼'), - (0x2F95D, 'M', '𥪧'), - (0x2F95F, 'X'), - (0x2F960, 'M', '䈂'), - (0x2F961, 'M', '𥮫'), - (0x2F962, 'M', '篆'), - (0x2F963, 'M', '築'), - (0x2F964, 'M', '䈧'), - (0x2F965, 'M', '𥲀'), - (0x2F966, 'M', '糒'), - (0x2F967, 'M', '䊠'), - (0x2F968, 'M', '糨'), - (0x2F969, 'M', '糣'), - (0x2F96A, 'M', '紀'), - (0x2F96B, 'M', '𥾆'), - (0x2F96C, 'M', '絣'), - (0x2F96D, 'M', '䌁'), - (0x2F96E, 'M', '緇'), - (0x2F96F, 'M', '縂'), - (0x2F970, 'M', '繅'), - (0x2F971, 'M', '䌴'), - ] - -def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F972, 'M', '𦈨'), - (0x2F973, 'M', '𦉇'), - (0x2F974, 'M', '䍙'), - (0x2F975, 'M', '𦋙'), - (0x2F976, 'M', '罺'), - (0x2F977, 'M', '𦌾'), - (0x2F978, 'M', '羕'), - (0x2F979, 'M', '翺'), - (0x2F97A, 'M', '者'), - (0x2F97B, 'M', '𦓚'), - (0x2F97C, 'M', '𦔣'), - (0x2F97D, 'M', '聠'), - (0x2F97E, 'M', '𦖨'), - (0x2F97F, 'M', '聰'), - (0x2F980, 'M', '𣍟'), - (0x2F981, 'M', '䏕'), - (0x2F982, 'M', '育'), - (0x2F983, 'M', '脃'), - (0x2F984, 'M', '䐋'), - (0x2F985, 'M', '脾'), - (0x2F986, 'M', '媵'), - (0x2F987, 'M', '𦞧'), - (0x2F988, 'M', '𦞵'), - (0x2F989, 'M', '𣎓'), - (0x2F98A, 'M', '𣎜'), - (0x2F98B, 'M', '舁'), - (0x2F98C, 'M', '舄'), - (0x2F98D, 'M', '辞'), - (0x2F98E, 'M', '䑫'), - (0x2F98F, 'M', '芑'), - (0x2F990, 'M', '芋'), - (0x2F991, 'M', '芝'), - (0x2F992, 'M', '劳'), - (0x2F993, 'M', '花'), - (0x2F994, 'M', '芳'), - (0x2F995, 'M', '芽'), - (0x2F996, 'M', '苦'), - (0x2F997, 'M', '𦬼'), - (0x2F998, 'M', '若'), - (0x2F999, 'M', '茝'), - (0x2F99A, 'M', '荣'), - (0x2F99B, 'M', '莭'), - (0x2F99C, 'M', '茣'), - (0x2F99D, 'M', '莽'), - (0x2F99E, 'M', '菧'), - (0x2F99F, 'M', '著'), - (0x2F9A0, 'M', '荓'), - (0x2F9A1, 'M', '菊'), - (0x2F9A2, 'M', '菌'), - (0x2F9A3, 'M', '菜'), - (0x2F9A4, 'M', '𦰶'), - (0x2F9A5, 'M', '𦵫'), - (0x2F9A6, 'M', '𦳕'), - (0x2F9A7, 'M', '䔫'), - (0x2F9A8, 'M', '蓱'), - (0x2F9A9, 'M', '蓳'), - (0x2F9AA, 'M', '蔖'), - (0x2F9AB, 'M', '𧏊'), - (0x2F9AC, 'M', '蕤'), - (0x2F9AD, 'M', '𦼬'), - (0x2F9AE, 'M', '䕝'), - (0x2F9AF, 'M', '䕡'), - (0x2F9B0, 'M', '𦾱'), - (0x2F9B1, 'M', '𧃒'), - (0x2F9B2, 'M', '䕫'), - (0x2F9B3, 'M', '虐'), - (0x2F9B4, 'M', '虜'), - (0x2F9B5, 'M', '虧'), - (0x2F9B6, 'M', '虩'), - (0x2F9B7, 'M', '蚩'), - (0x2F9B8, 'M', '蚈'), - (0x2F9B9, 'M', '蜎'), - (0x2F9BA, 'M', '蛢'), - (0x2F9BB, 'M', '蝹'), - (0x2F9BC, 'M', '蜨'), - (0x2F9BD, 'M', '蝫'), - (0x2F9BE, 'M', '螆'), - (0x2F9BF, 'X'), - (0x2F9C0, 'M', '蟡'), - (0x2F9C1, 'M', '蠁'), - (0x2F9C2, 'M', '䗹'), - (0x2F9C3, 'M', '衠'), - (0x2F9C4, 'M', '衣'), - (0x2F9C5, 'M', '𧙧'), - (0x2F9C6, 'M', '裗'), - (0x2F9C7, 'M', '裞'), - (0x2F9C8, 'M', '䘵'), - (0x2F9C9, 'M', '裺'), - (0x2F9CA, 'M', '㒻'), - (0x2F9CB, 'M', '𧢮'), - (0x2F9CC, 'M', '𧥦'), - (0x2F9CD, 'M', '䚾'), - (0x2F9CE, 'M', '䛇'), - (0x2F9CF, 'M', '誠'), - (0x2F9D0, 'M', '諭'), - (0x2F9D1, 'M', '變'), - (0x2F9D2, 'M', '豕'), - (0x2F9D3, 'M', '𧲨'), - (0x2F9D4, 'M', '貫'), - (0x2F9D5, 'M', '賁'), - ] - -def _seg_81() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ - (0x2F9D6, 'M', '贛'), - (0x2F9D7, 'M', '起'), - (0x2F9D8, 'M', '𧼯'), - (0x2F9D9, 'M', '𠠄'), - (0x2F9DA, 'M', '跋'), - (0x2F9DB, 'M', '趼'), - (0x2F9DC, 'M', '跰'), - (0x2F9DD, 'M', '𠣞'), - (0x2F9DE, 'M', '軔'), - (0x2F9DF, 'M', '輸'), - (0x2F9E0, 'M', '𨗒'), - (0x2F9E1, 'M', '𨗭'), - (0x2F9E2, 'M', '邔'), - (0x2F9E3, 'M', '郱'), - (0x2F9E4, 'M', '鄑'), - (0x2F9E5, 'M', '𨜮'), - (0x2F9E6, 'M', '鄛'), - (0x2F9E7, 'M', '鈸'), - (0x2F9E8, 'M', '鋗'), - (0x2F9E9, 'M', '鋘'), - (0x2F9EA, 'M', '鉼'), - (0x2F9EB, 'M', '鏹'), - (0x2F9EC, 'M', '鐕'), - (0x2F9ED, 'M', '𨯺'), - (0x2F9EE, 'M', '開'), - (0x2F9EF, 'M', '䦕'), - (0x2F9F0, 'M', '閷'), - (0x2F9F1, 'M', '𨵷'), - (0x2F9F2, 'M', '䧦'), - (0x2F9F3, 'M', '雃'), - (0x2F9F4, 'M', '嶲'), - (0x2F9F5, 'M', '霣'), - (0x2F9F6, 'M', '𩅅'), - (0x2F9F7, 'M', '𩈚'), - (0x2F9F8, 'M', '䩮'), - (0x2F9F9, 'M', '䩶'), - (0x2F9FA, 'M', '韠'), - (0x2F9FB, 'M', '𩐊'), - (0x2F9FC, 'M', '䪲'), - (0x2F9FD, 'M', '𩒖'), - (0x2F9FE, 'M', '頋'), - (0x2FA00, 'M', '頩'), - (0x2FA01, 'M', '𩖶'), - (0x2FA02, 'M', '飢'), - (0x2FA03, 'M', '䬳'), - (0x2FA04, 'M', '餩'), - (0x2FA05, 'M', '馧'), - (0x2FA06, 'M', '駂'), - (0x2FA07, 'M', '駾'), - (0x2FA08, 'M', '䯎'), - (0x2FA09, 'M', '𩬰'), - (0x2FA0A, 'M', '鬒'), - (0x2FA0B, 'M', '鱀'), - (0x2FA0C, 'M', '鳽'), - (0x2FA0D, 'M', '䳎'), - (0x2FA0E, 'M', '䳭'), - (0x2FA0F, 'M', '鵧'), - (0x2FA10, 'M', '𪃎'), - (0x2FA11, 'M', '䳸'), - (0x2FA12, 'M', '𪄅'), - (0x2FA13, 'M', '𪈎'), - (0x2FA14, 'M', '𪊑'), - (0x2FA15, 'M', '麻'), - (0x2FA16, 'M', '䵖'), - (0x2FA17, 'M', '黹'), - (0x2FA18, 'M', '黾'), - (0x2FA19, 'M', '鼅'), - (0x2FA1A, 'M', '鼏'), - (0x2FA1B, 'M', '鼖'), - (0x2FA1C, 'M', '鼻'), - (0x2FA1D, 'M', '𪘀'), - (0x2FA1E, 'X'), - (0x30000, 'V'), - (0x3134B, 'X'), - (0x31350, 'V'), - (0x323B0, 'X'), - (0xE0100, 'I'), - (0xE01F0, 'X'), - ] - -uts46data = tuple( - _seg_0() - + _seg_1() - + _seg_2() - + _seg_3() - + _seg_4() - + _seg_5() - + _seg_6() - + _seg_7() - + _seg_8() - + _seg_9() - + _seg_10() - + _seg_11() - + _seg_12() - + _seg_13() - + _seg_14() - + _seg_15() - + _seg_16() - + _seg_17() - + _seg_18() - + _seg_19() - + _seg_20() - + _seg_21() - + _seg_22() - + _seg_23() - + _seg_24() - + _seg_25() - + _seg_26() - + _seg_27() - + _seg_28() - + _seg_29() - + _seg_30() - + _seg_31() - + _seg_32() - + _seg_33() - + _seg_34() - + _seg_35() - + _seg_36() - + _seg_37() - + _seg_38() - + _seg_39() - + _seg_40() - + _seg_41() - + _seg_42() - + _seg_43() - + _seg_44() - + _seg_45() - + _seg_46() - + _seg_47() - + _seg_48() - + _seg_49() - + _seg_50() - + _seg_51() - + _seg_52() - + _seg_53() - + _seg_54() - + _seg_55() - + _seg_56() - + _seg_57() - + _seg_58() - + _seg_59() - + _seg_60() - + _seg_61() - + _seg_62() - + _seg_63() - + _seg_64() - + _seg_65() - + _seg_66() - + _seg_67() - + _seg_68() - + _seg_69() - + _seg_70() - + _seg_71() - + _seg_72() - + _seg_73() - + _seg_74() - + _seg_75() - + _seg_76() - + _seg_77() - + _seg_78() - + _seg_79() - + _seg_80() - + _seg_81() -) # type: Tuple[Union[Tuple[int, str], Tuple[int, str, str]], ...] diff --git a/spaces/playgrdstar/ancient-chinese-calligraphy/app.py b/spaces/playgrdstar/ancient-chinese-calligraphy/app.py deleted file mode 100644 index 6366fccce192fa5b293538405107822b28d70df3..0000000000000000000000000000000000000000 --- a/spaces/playgrdstar/ancient-chinese-calligraphy/app.py +++ /dev/null @@ -1,104 +0,0 @@ -import os -import torch - -import PIL -from PIL import Image - -from diffusers import StableDiffusionPipeline -from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer - -from huggingface_hub import hf_hub_download - -pretrained_model_name_or_path = "stabilityai/stable-diffusion-2" - -repo_id_embeds = "playgrdstar/ancient-chinese-calligraphy" - -embeds_path = hf_hub_download(repo_id=repo_id_embeds, filename="learned_embeds.bin") - -print(f'Embedding path: {embeds_path}') - -learned_embeds_path = embeds_path - -tokenizer = CLIPTokenizer.from_pretrained( - pretrained_model_name_or_path, - subfolder="tokenizer", -) -text_encoder = CLIPTextModel.from_pretrained( - pretrained_model_name_or_path, subfolder="text_encoder", torch_dtype=torch.float32 -) - -print(f'Models loaded') - -def load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer, token=None): - loaded_learned_embeds = torch.load(learned_embeds_path, map_location="cpu") - - # separate token and the embeds - trained_token = list(loaded_learned_embeds.keys())[0] - embeds = loaded_learned_embeds[trained_token] - - # cast to dtype of text_encoder - dtype = text_encoder.get_input_embeddings().weight.dtype - embeds.to(dtype) - - # add the token in tokenizer - token = token if token is not None else trained_token - num_added_tokens = tokenizer.add_tokens(token) - if num_added_tokens == 0: - raise ValueError(f"The tokenizer already contains the token {token}. Please pass a different `token` that is not already in the tokenizer.") - - # resize the token embeddings - text_encoder.resize_token_embeddings(len(tokenizer)) - - # get the id for the token and assign the embeds - token_id = tokenizer.convert_tokens_to_ids(token) - text_encoder.get_input_embeddings().weight.data[token_id] = embeds - -load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer) - -pipe = StableDiffusionPipeline.from_pretrained( - pretrained_model_name_or_path, - torch_dtype=torch.float32, - text_encoder=text_encoder, - tokenizer=tokenizer, -) - -print(f'Pipeline loaded') - -import gradio as gr - -def generate_prompt(prompt_text): - full_prompt = f"a {prompt_text} in the style of \u003Cchinese-calligraph>, white background" - return full_prompt - -def generate_image(prompt_text): - prompt = generate_prompt(prompt_text) - images = pipe([prompt], num_inference_steps=30, guidance_scale=7.5).images - return images[0] - -with gr.Blocks(css='style.css') as demo: - gr.HTML(""" -
    -

    - Ancient Chinese Calligraphy Generation -

    -
    - """) - - with gr.Column(elem_id="col-container"): - with gr.Row(variant="compact"): - prompt = gr.Textbox( - label="Enter your prompt", - show_label=False, - max_lines=2, - placeholder="prompt", - ).style( - container=False, - ) - generate = gr.Button("Generate").style(full_width=False) - - with gr.Row(): - output=gr.Image(label="",show_label=False).style(height=500, width=500) - - generate.click(generate_image, inputs=[prompt], outputs=[output]) - -demo.launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/pngwn/huguru/app.py b/spaces/pngwn/huguru/app.py deleted file mode 100644 index 9f3cf9ef8a51ab97b9d7f4afb976b36c13569e47..0000000000000000000000000000000000000000 --- a/spaces/pngwn/huguru/app.py +++ /dev/null @@ -1,106 +0,0 @@ -import gradio as gr -from share_btn import community_icon_html, loading_icon_html, share_js -import random -import re - -import torch -from transformers import AutoModelWithLMHead, AutoTokenizer, pipeline, set_seed - -import gradio as grad -from diffusers import StableDiffusionPipeline - -tokenizer = AutoTokenizer.from_pretrained("shahp7575/gpt2-horoscopes") -model = AutoModelWithLMHead.from_pretrained("shahp7575/gpt2-horoscopes") - -def fn(sign, cat): - sign = "scorpio" - - prompt = f"<|category|> {cat} <|horoscope|> {sign}" - - - - prompt_encoded = torch.tensor(tokenizer.encode(prompt)).unsqueeze(0) - - sample_outputs = model.generate( - prompt_encoded, - do_sample=True, - top_k=40, - max_length=300, - top_p=0.95, - temperature=0.95, - num_beams=4, - num_return_sequences=4, - ) - - final_out = tokenizer.decode(sample_outputs[0], skip_special_tokens=True) - starting_text = " ".join(final_out.split(" ")[4:]) - pipe = pipeline("text-generation", model="Gustavosta/MagicPrompt-Stable-Diffusion", tokenizer="gpt2") - - seed = random.randint(100, 1000000) - set_seed(seed) - response = pipe(starting_text + " " + sign + " art.", max_length=(len(starting_text) + random.randint(60, 90)), num_return_sequences=1) - pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") - image = pipe(response[0]["generated_text"], num_inference_steps=5).images[0] - return [image, starting_text] - - -block = gr.Blocks(css="./css.css") - -with block: - with gr.Group(): - with gr.Box(): - with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True): - text = gr.Dropdown( - label="Star Sign", - choices=["aries", "taurus","gemini", "cancer", "leo", "virgo", "libra", "scorpio", "sagittarius", "capricorn", "aquarius", "Pisces"], - show_label=True, - max_lines=1, - placeholder="Enter your prompt", - elem_id="prompt-text-input", - ).style( - border=(True, False, True, True), - rounded=(True, False, False, True), - container=False, - ) - - text2 = gr.Dropdown( - choices=["love", "career", "wellness"], - label="Category", - show_label=True, - max_lines=1, - placeholder="Enter your prompt", - elem_id="prompt-text-input", - ).style( - border=(True, True, True, True), - rounded=(True, False, False, True), - container=False, - ) - - btn = gr.Button("Generate image").style( - margin=False, - rounded=(False, True, True, False), - full_width=False, - ) - - gallery = gr.Image( - interactive=False, - label="Generated images", show_label=False, elem_id="gallery" - ).style(grid=[2], height="auto") - text = gr.Textbox("Text") - - with gr.Group(elem_id="container-advanced-btns"): - with gr.Group(elem_id="share-btn-container"): - community_icon = gr.HTML(community_icon_html) - loading_icon = gr.HTML(loading_icon_html) - share_button = gr.Button("Share to community", elem_id="share-btn") - - btn.click(fn=fn, inputs=[text, text2], outputs=[gallery, text]) - share_button.click( - None, - [], - [], - _js=share_js, - ) - - -block.queue(concurrency_count=40, max_size=20).launch(max_threads=150) diff --git a/spaces/prerna9811/Chord/portaudio/test/patest_unplug.c b/spaces/prerna9811/Chord/portaudio/test/patest_unplug.c deleted file mode 100644 index 0e4486e721e4dfd674c5882cd3d692f6669bca0d..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/Chord/portaudio/test/patest_unplug.c +++ /dev/null @@ -1,243 +0,0 @@ -/** @file patest_unplug.c - @ingroup test_src - @brief Debug a crash involving unplugging a USB device. - @author Phil Burk http://www.softsynth.com -*/ -/* - * $Id$ - * - * This program uses the PortAudio Portable Audio Library. - * For more information see: http://www.portaudio.com - * Copyright (c) 1999-2000 Ross Bencina and Phil Burk - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -#include -#include -#include -#include -#include "portaudio.h" - -#define NUM_SECONDS (8) -#define SAMPLE_RATE (44100) -#ifndef M_PI -#define M_PI (3.14159265) -#endif -#define TABLE_SIZE (200) -#define FRAMES_PER_BUFFER (64) -#define MAX_CHANNELS (8) - -typedef struct -{ - short sine[TABLE_SIZE]; - int32_t phases[MAX_CHANNELS]; - int32_t numChannels; - int32_t sampsToGo; -} -paTestData; - - -static int inputCallback( const void *inputBuffer, void *outputBuffer, - unsigned long framesPerBuffer, - const PaStreamCallbackTimeInfo* timeInfo, - PaStreamCallbackFlags statusFlags, - void *userData ) -{ - paTestData *data = (paTestData*)userData; - int finished = 0; - (void) inputBuffer; /* Prevent "unused variable" warnings. */ - (void) outputBuffer; /* Prevent "unused variable" warnings. */ - - data->sampsToGo -= framesPerBuffer; - if (data->sampsToGo <= 0) - { - data->sampsToGo = 0; - finished = 1; - } - return finished; -} - -static int outputCallback( const void *inputBuffer, void *outputBuffer, - unsigned long framesPerBuffer, - const PaStreamCallbackTimeInfo* timeInfo, - PaStreamCallbackFlags statusFlags, - void *userData ) -{ - paTestData *data = (paTestData*)userData; - short *out = (short*)outputBuffer; - unsigned int i; - int finished = 0; - (void) inputBuffer; /* Prevent "unused variable" warnings. */ - - for( i=0; inumChannels; channelIndex++) - { - int phase = data->phases[channelIndex]; - *out++ = data->sine[phase]; - phase += channelIndex + 2; - if( phase >= TABLE_SIZE ) phase -= TABLE_SIZE; - data->phases[channelIndex] = phase; - } - } - return finished; -} - -/*******************************************************************/ -int main(int argc, char **args); -int main(int argc, char **args) -{ - PaStreamParameters inputParameters; - PaStreamParameters outputParameters; - PaStream *inputStream; - PaStream *outputStream; - const PaDeviceInfo *deviceInfo; - PaError err; - paTestData data; - int i; - int totalSamps; - int inputDevice = -1; - int outputDevice = -1; - - printf("Test unplugging a USB device.\n"); - - if( argc > 1 ) { - inputDevice = outputDevice = atoi( args[1] ); - printf("Using device number %d.\n\n", inputDevice ); - } else { - printf("Using default device.\n\n" ); - } - - memset(&data, 0, sizeof(data)); - - /* initialise sinusoidal wavetable */ - for( i=0; idefaultLowInputLatency; - inputParameters.hostApiSpecificStreamInfo = NULL; - err = Pa_OpenStream( - &inputStream, - &inputParameters, - NULL, - SAMPLE_RATE, - FRAMES_PER_BUFFER, - 0, - inputCallback, - &data ); - if( err != paNoError ) goto error; - - outputParameters.channelCount = 2; - outputParameters.sampleFormat = paInt16; - deviceInfo = Pa_GetDeviceInfo( outputParameters.device ); - if( deviceInfo == NULL ) - { - fprintf( stderr, "No matching output device.\n" ); - goto error; - } - outputParameters.suggestedLatency = deviceInfo->defaultLowOutputLatency; - outputParameters.hostApiSpecificStreamInfo = NULL; - err = Pa_OpenStream( - &outputStream, - NULL, - &outputParameters, - SAMPLE_RATE, - FRAMES_PER_BUFFER, - (paClipOff | paDitherOff), - outputCallback, - &data ); - if( err != paNoError ) goto error; - - err = Pa_StartStream( inputStream ); - if( err != paNoError ) goto error; - err = Pa_StartStream( outputStream ); - if( err != paNoError ) goto error; - - printf("When you hear sound, unplug the USB device.\n"); - do - { - Pa_Sleep(500); - printf("Frames remaining = %d\n", data.sampsToGo); - printf("Pa_IsStreamActive(inputStream) = %d\n", Pa_IsStreamActive(inputStream)); - printf("Pa_IsStreamActive(outputStream) = %d\n", Pa_IsStreamActive(outputStream)); - } while( Pa_IsStreamActive(inputStream) && Pa_IsStreamActive(outputStream) ); - - err = Pa_CloseStream( inputStream ); - if( err != paNoError ) goto error; - err = Pa_CloseStream( outputStream ); - if( err != paNoError ) goto error; - Pa_Terminate(); - return paNoError; -error: - Pa_Terminate(); - fprintf( stderr, "An error occurred while using the portaudio stream\n" ); - fprintf( stderr, "Error number: %d\n", err ); - fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) ); - fprintf( stderr, "Host Error message: %s\n", Pa_GetLastHostErrorInfo()->errorText ); - return err; -} diff --git a/spaces/productizationlabs/MyChatGPTTurbo/app.py b/spaces/productizationlabs/MyChatGPTTurbo/app.py deleted file mode 100644 index 1e1beb59ffb184f365c0cec42a0b74f6bc97617b..0000000000000000000000000000000000000000 --- a/spaces/productizationlabs/MyChatGPTTurbo/app.py +++ /dev/null @@ -1,41 +0,0 @@ -_C='role' -_B=True -_A='content' -import os,gradio as gr,json,requests,openai -try:openai.api_key=os.environ['OPENAI_API_KEY'] -except KeyError: - error_message='System is at capacity right now.Please try again later';print(error_message) - def chatbot(input):return error_message -else:messages=[{_C:'system',_A:'My AI Assistant'}] -API_URL='https://api.openai.com/v1/chat/completions' -top_p_chatgpt=1.0 -temperature_chatgpt=1.0 -def chatbot(inputs,chat_counter_chatgpt,chatbot_chatgpt=[],history=[]): - X='delta';W='choices';V='gpt-3.5-turbo';U='frequency_penalty';T='presence_penalty';S='stream';R='top_p';Q='temperature';P='messages';O='model';J='user';F=chat_counter_chatgpt;E=inputs;A=history;K={O:V,P:[{_C:J,_A:f"{E}"}],Q:1.0,R:1.0,'n':1,S:_B,T:0,U:0};Y={'Content-Type':'application/json','Authorization':f"Bearer {openai.api_key}"} - if F!=0: - C=[] - for L in chatbot_chatgpt:G={};G[_C]=J;G[_A]=L[0];H={};H[_C]='assistant';H[_A]=L[1];C.append(G);C.append(H) - I={};I[_C]=J;I[_A]=E;C.append(I);K={O:V,P:C,Q:temperature_chatgpt,R:top_p_chatgpt,'n':1,S:_B,T:0,U:0} - F+=1;A.append('You asked: '+E);Z=requests.post(API_URL,headers=Y,json=K,stream=_B);M=0;D='';N=0 - for B in Z.iter_lines(): - if N==0:N+=1;continue - if B.decode(): - B=B.decode() - if len(B)>13 and _A in json.loads(B[6:])[W][0][X]: - D=D+json.loads(B[6:])[W][0][X][_A] - if M==0:A.append(' '+D) - else:A[-1]=D - a=[(A[B],A[B+1])for B in range(0,len(A)-1,2)];M+=1;yield(a,A,F) -def reset_textbox():return gr.update(value='') -def reset_chat(chatbot,state):return None,[] -with gr.Blocks(css='#col_container {width: 1000px; margin-left: auto; margin-right: auto;}\n #chatgpt {height: 400px; overflow: auto;}} ',theme=gr.themes.Default(primary_hue='slate'))as ChatGPTTurbo: - with gr.Row(): - with gr.Column(scale=14): - with gr.Box(): - with gr.Row(): - with gr.Column(scale=13):inputs=gr.Textbox(label='Ask me anything ⤵️ Try: Value of pi') - with gr.Column(scale=1):b1=gr.Button('Submit',elem_id='submit').style(full_width=_B);b2=gr.Button('Clear',elem_id='clear').style(full_width=_B) - state_chatgpt=gr.State([]) - with gr.Box(): - with gr.Row():chatbot_chatgpt=gr.Chatbot(elem_id='chatgpt',label='My ChatGPT Turbo') - chat_counter_chatgpt=gr.Number(value=0,visible=False,precision=0);inputs.submit(reset_textbox,[],[inputs]);b1.click(chatbot,[inputs,chat_counter_chatgpt,chatbot_chatgpt,state_chatgpt],[chatbot_chatgpt,state_chatgpt]);b2.click(reset_chat,[chatbot_chatgpt,state_chatgpt],[chatbot_chatgpt,state_chatgpt]);ChatGPTTurbo.queue(concurrency_count=16).launch(height=2500,debug=_B) \ No newline at end of file diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/charset_normalizer/version.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/charset_normalizer/version.py deleted file mode 100644 index 5a4da4ff49bc80ef49e8aa7e01cc8555518bd1b1..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/charset_normalizer/version.py +++ /dev/null @@ -1,6 +0,0 @@ -""" -Expose version -""" - -__version__ = "3.3.2" -VERSION = __version__.split(".") diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/voltLib/error.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/voltLib/error.py deleted file mode 100644 index c51d3b8fdc45afdb7bafbeb13a951264e0228985..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/voltLib/error.py +++ /dev/null @@ -1,12 +0,0 @@ -class VoltLibError(Exception): - def __init__(self, message, location): - Exception.__init__(self, message) - self.location = location - - def __str__(self): - message = Exception.__str__(self) - if self.location: - path, line, column = self.location - return "%s:%d:%d: %s" % (path, line, column, message) - else: - return message diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fsspec/tests/abstract/copy.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fsspec/tests/abstract/copy.py deleted file mode 100644 index 01ff09c95e7d81b3aa3a58436f795b4d47b6889a..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fsspec/tests/abstract/copy.py +++ /dev/null @@ -1,543 +0,0 @@ -from hashlib import md5 -from itertools import product - -import pytest - -from fsspec.tests.abstract.common import GLOB_EDGE_CASES_TESTS - - -class AbstractCopyTests: - def test_copy_file_to_existing_directory( - self, - fs, - fs_join, - fs_bulk_operations_scenario_0, - fs_target, - supports_empty_directories, - ): - # Copy scenario 1a - source = fs_bulk_operations_scenario_0 - - target = fs_target - fs.mkdir(target) - if not supports_empty_directories: - # Force target directory to exist by adding a dummy file - fs.touch(fs_join(target, "dummy")) - assert fs.isdir(target) - - target_file2 = fs_join(target, "file2") - target_subfile1 = fs_join(target, "subfile1") - - # Copy from source directory - fs.cp(fs_join(source, "file2"), target) - assert fs.isfile(target_file2) - - # Copy from sub directory - fs.cp(fs_join(source, "subdir", "subfile1"), target) - assert fs.isfile(target_subfile1) - - # Remove copied files - fs.rm([target_file2, target_subfile1]) - assert not fs.exists(target_file2) - assert not fs.exists(target_subfile1) - - # Repeat with trailing slash on target - fs.cp(fs_join(source, "file2"), target + "/") - assert fs.isdir(target) - assert fs.isfile(target_file2) - - fs.cp(fs_join(source, "subdir", "subfile1"), target + "/") - assert fs.isfile(target_subfile1) - - def test_copy_file_to_new_directory( - self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target - ): - # Copy scenario 1b - source = fs_bulk_operations_scenario_0 - - target = fs_target - fs.mkdir(target) - - fs.cp( - fs_join(source, "subdir", "subfile1"), fs_join(target, "newdir/") - ) # Note trailing slash - assert fs.isdir(target) - assert fs.isdir(fs_join(target, "newdir")) - assert fs.isfile(fs_join(target, "newdir", "subfile1")) - - def test_copy_file_to_file_in_existing_directory( - self, - fs, - fs_join, - fs_bulk_operations_scenario_0, - fs_target, - supports_empty_directories, - ): - # Copy scenario 1c - source = fs_bulk_operations_scenario_0 - - target = fs_target - fs.mkdir(target) - if not supports_empty_directories: - # Force target directory to exist by adding a dummy file - fs.touch(fs_join(target, "dummy")) - assert fs.isdir(target) - - fs.cp(fs_join(source, "subdir", "subfile1"), fs_join(target, "newfile")) - assert fs.isfile(fs_join(target, "newfile")) - - def test_copy_file_to_file_in_new_directory( - self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target - ): - # Copy scenario 1d - source = fs_bulk_operations_scenario_0 - - target = fs_target - fs.mkdir(target) - - fs.cp( - fs_join(source, "subdir", "subfile1"), fs_join(target, "newdir", "newfile") - ) - assert fs.isdir(fs_join(target, "newdir")) - assert fs.isfile(fs_join(target, "newdir", "newfile")) - - def test_copy_directory_to_existing_directory( - self, - fs, - fs_join, - fs_bulk_operations_scenario_0, - fs_target, - supports_empty_directories, - ): - # Copy scenario 1e - source = fs_bulk_operations_scenario_0 - - target = fs_target - fs.mkdir(target) - if not supports_empty_directories: - # Force target directory to exist by adding a dummy file - dummy = fs_join(target, "dummy") - fs.touch(dummy) - assert fs.isdir(target) - - for source_slash, target_slash in zip([False, True], [False, True]): - s = fs_join(source, "subdir") - if source_slash: - s += "/" - t = target + "/" if target_slash else target - - # Without recursive does nothing - fs.cp(s, t) - assert fs.ls(target) == ([] if supports_empty_directories else [dummy]) - - # With recursive - fs.cp(s, t, recursive=True) - if source_slash: - assert fs.isfile(fs_join(target, "subfile1")) - assert fs.isfile(fs_join(target, "subfile2")) - assert fs.isdir(fs_join(target, "nesteddir")) - assert fs.isfile(fs_join(target, "nesteddir", "nestedfile")) - assert not fs.exists(fs_join(target, "subdir")) - - fs.rm( - [ - fs_join(target, "subfile1"), - fs_join(target, "subfile2"), - fs_join(target, "nesteddir"), - ], - recursive=True, - ) - else: - assert fs.isdir(fs_join(target, "subdir")) - assert fs.isfile(fs_join(target, "subdir", "subfile1")) - assert fs.isfile(fs_join(target, "subdir", "subfile2")) - assert fs.isdir(fs_join(target, "subdir", "nesteddir")) - assert fs.isfile(fs_join(target, "subdir", "nesteddir", "nestedfile")) - - fs.rm(fs_join(target, "subdir"), recursive=True) - assert fs.ls(target) == ([] if supports_empty_directories else [dummy]) - - # Limit recursive by maxdepth - fs.cp(s, t, recursive=True, maxdepth=1) - if source_slash: - assert fs.isfile(fs_join(target, "subfile1")) - assert fs.isfile(fs_join(target, "subfile2")) - assert not fs.exists(fs_join(target, "nesteddir")) - assert not fs.exists(fs_join(target, "subdir")) - - fs.rm( - [ - fs_join(target, "subfile1"), - fs_join(target, "subfile2"), - ], - recursive=True, - ) - else: - assert fs.isdir(fs_join(target, "subdir")) - assert fs.isfile(fs_join(target, "subdir", "subfile1")) - assert fs.isfile(fs_join(target, "subdir", "subfile2")) - assert not fs.exists(fs_join(target, "subdir", "nesteddir")) - - fs.rm(fs_join(target, "subdir"), recursive=True) - assert fs.ls(target) == ([] if supports_empty_directories else [dummy]) - - def test_copy_directory_to_new_directory( - self, - fs, - fs_join, - fs_bulk_operations_scenario_0, - fs_target, - supports_empty_directories, - ): - # Copy scenario 1f - source = fs_bulk_operations_scenario_0 - - target = fs_target - fs.mkdir(target) - - for source_slash, target_slash in zip([False, True], [False, True]): - s = fs_join(source, "subdir") - if source_slash: - s += "/" - t = fs_join(target, "newdir") - if target_slash: - t += "/" - - # Without recursive does nothing - fs.cp(s, t) - if supports_empty_directories: - assert fs.ls(target) == [] - else: - with pytest.raises(FileNotFoundError): - fs.ls(target) - - # With recursive - fs.cp(s, t, recursive=True) - assert fs.isdir(fs_join(target, "newdir")) - assert fs.isfile(fs_join(target, "newdir", "subfile1")) - assert fs.isfile(fs_join(target, "newdir", "subfile2")) - assert fs.isdir(fs_join(target, "newdir", "nesteddir")) - assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile")) - assert not fs.exists(fs_join(target, "subdir")) - - fs.rm(fs_join(target, "newdir"), recursive=True) - assert not fs.exists(fs_join(target, "newdir")) - - # Limit recursive by maxdepth - fs.cp(s, t, recursive=True, maxdepth=1) - assert fs.isdir(fs_join(target, "newdir")) - assert fs.isfile(fs_join(target, "newdir", "subfile1")) - assert fs.isfile(fs_join(target, "newdir", "subfile2")) - assert not fs.exists(fs_join(target, "newdir", "nesteddir")) - assert not fs.exists(fs_join(target, "subdir")) - - fs.rm(fs_join(target, "newdir"), recursive=True) - assert not fs.exists(fs_join(target, "newdir")) - - def test_copy_glob_to_existing_directory( - self, - fs, - fs_join, - fs_bulk_operations_scenario_0, - fs_target, - supports_empty_directories, - ): - # Copy scenario 1g - source = fs_bulk_operations_scenario_0 - - target = fs_target - fs.mkdir(target) - if not supports_empty_directories: - # Force target directory to exist by adding a dummy file - dummy = fs_join(target, "dummy") - fs.touch(dummy) - assert fs.isdir(target) - - for target_slash in [False, True]: - t = target + "/" if target_slash else target - - # Without recursive - fs.cp(fs_join(source, "subdir", "*"), t) - assert fs.isfile(fs_join(target, "subfile1")) - assert fs.isfile(fs_join(target, "subfile2")) - assert not fs.isdir(fs_join(target, "nesteddir")) - assert not fs.exists(fs_join(target, "nesteddir", "nestedfile")) - assert not fs.exists(fs_join(target, "subdir")) - - fs.rm( - [ - fs_join(target, "subfile1"), - fs_join(target, "subfile2"), - ], - recursive=True, - ) - assert fs.ls(target) == ([] if supports_empty_directories else [dummy]) - - # With recursive - for glob, recursive in zip(["*", "**"], [True, False]): - fs.cp(fs_join(source, "subdir", glob), t, recursive=recursive) - assert fs.isfile(fs_join(target, "subfile1")) - assert fs.isfile(fs_join(target, "subfile2")) - assert fs.isdir(fs_join(target, "nesteddir")) - assert fs.isfile(fs_join(target, "nesteddir", "nestedfile")) - assert not fs.exists(fs_join(target, "subdir")) - - fs.rm( - [ - fs_join(target, "subfile1"), - fs_join(target, "subfile2"), - fs_join(target, "nesteddir"), - ], - recursive=True, - ) - assert fs.ls(target) == ([] if supports_empty_directories else [dummy]) - - # Limit recursive by maxdepth - fs.cp( - fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1 - ) - assert fs.isfile(fs_join(target, "subfile1")) - assert fs.isfile(fs_join(target, "subfile2")) - assert not fs.exists(fs_join(target, "nesteddir")) - assert not fs.exists(fs_join(target, "subdir")) - - fs.rm( - [ - fs_join(target, "subfile1"), - fs_join(target, "subfile2"), - ], - recursive=True, - ) - assert fs.ls(target) == ([] if supports_empty_directories else [dummy]) - - def test_copy_glob_to_new_directory( - self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target - ): - # Copy scenario 1h - source = fs_bulk_operations_scenario_0 - - target = fs_target - fs.mkdir(target) - - for target_slash in [False, True]: - t = fs_join(target, "newdir") - if target_slash: - t += "/" - - # Without recursive - fs.cp(fs_join(source, "subdir", "*"), t) - assert fs.isdir(fs_join(target, "newdir")) - assert fs.isfile(fs_join(target, "newdir", "subfile1")) - assert fs.isfile(fs_join(target, "newdir", "subfile2")) - assert not fs.exists(fs_join(target, "newdir", "nesteddir")) - assert not fs.exists(fs_join(target, "newdir", "nesteddir", "nestedfile")) - assert not fs.exists(fs_join(target, "subdir")) - assert not fs.exists(fs_join(target, "newdir", "subdir")) - - fs.rm(fs_join(target, "newdir"), recursive=True) - assert not fs.exists(fs_join(target, "newdir")) - - # With recursive - for glob, recursive in zip(["*", "**"], [True, False]): - fs.cp(fs_join(source, "subdir", glob), t, recursive=recursive) - assert fs.isdir(fs_join(target, "newdir")) - assert fs.isfile(fs_join(target, "newdir", "subfile1")) - assert fs.isfile(fs_join(target, "newdir", "subfile2")) - assert fs.isdir(fs_join(target, "newdir", "nesteddir")) - assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile")) - assert not fs.exists(fs_join(target, "subdir")) - assert not fs.exists(fs_join(target, "newdir", "subdir")) - - fs.rm(fs_join(target, "newdir"), recursive=True) - assert not fs.exists(fs_join(target, "newdir")) - - # Limit recursive by maxdepth - fs.cp( - fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1 - ) - assert fs.isdir(fs_join(target, "newdir")) - assert fs.isfile(fs_join(target, "newdir", "subfile1")) - assert fs.isfile(fs_join(target, "newdir", "subfile2")) - assert not fs.exists(fs_join(target, "newdir", "nesteddir")) - assert not fs.exists(fs_join(target, "subdir")) - assert not fs.exists(fs_join(target, "newdir", "subdir")) - - fs.rm(fs_join(target, "newdir"), recursive=True) - assert not fs.exists(fs_join(target, "newdir")) - - @pytest.mark.parametrize( - GLOB_EDGE_CASES_TESTS["argnames"], - GLOB_EDGE_CASES_TESTS["argvalues"], - ) - def test_copy_glob_edge_cases( - self, - path, - recursive, - maxdepth, - expected, - fs, - fs_join, - fs_glob_edge_cases_files, - fs_target, - fs_sanitize_path, - ): - # Copy scenario 1g - source = fs_glob_edge_cases_files - - target = fs_target - - for new_dir, target_slash in product([True, False], [True, False]): - fs.mkdir(target) - - t = fs_join(target, "newdir") if new_dir else target - t = t + "/" if target_slash else t - - fs.copy(fs_join(source, path), t, recursive=recursive, maxdepth=maxdepth) - - output = fs.find(target) - if new_dir: - prefixed_expected = [ - fs_sanitize_path(fs_join(target, "newdir", p)) for p in expected - ] - else: - prefixed_expected = [ - fs_sanitize_path(fs_join(target, p)) for p in expected - ] - assert sorted(output) == sorted(prefixed_expected) - - try: - fs.rm(target, recursive=True) - except FileNotFoundError: - pass - - def test_copy_list_of_files_to_existing_directory( - self, - fs, - fs_join, - fs_bulk_operations_scenario_0, - fs_target, - supports_empty_directories, - ): - # Copy scenario 2a - source = fs_bulk_operations_scenario_0 - - target = fs_target - fs.mkdir(target) - if not supports_empty_directories: - # Force target directory to exist by adding a dummy file - dummy = fs_join(target, "dummy") - fs.touch(dummy) - assert fs.isdir(target) - - source_files = [ - fs_join(source, "file1"), - fs_join(source, "file2"), - fs_join(source, "subdir", "subfile1"), - ] - - for target_slash in [False, True]: - t = target + "/" if target_slash else target - - fs.cp(source_files, t) - assert fs.isfile(fs_join(target, "file1")) - assert fs.isfile(fs_join(target, "file2")) - assert fs.isfile(fs_join(target, "subfile1")) - - fs.rm( - [ - fs_join(target, "file1"), - fs_join(target, "file2"), - fs_join(target, "subfile1"), - ], - recursive=True, - ) - assert fs.ls(target) == ([] if supports_empty_directories else [dummy]) - - def test_copy_list_of_files_to_new_directory( - self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target - ): - # Copy scenario 2b - source = fs_bulk_operations_scenario_0 - - target = fs_target - fs.mkdir(target) - - source_files = [ - fs_join(source, "file1"), - fs_join(source, "file2"), - fs_join(source, "subdir", "subfile1"), - ] - - fs.cp(source_files, fs_join(target, "newdir") + "/") # Note trailing slash - assert fs.isdir(fs_join(target, "newdir")) - assert fs.isfile(fs_join(target, "newdir", "file1")) - assert fs.isfile(fs_join(target, "newdir", "file2")) - assert fs.isfile(fs_join(target, "newdir", "subfile1")) - - def test_copy_two_files_new_directory( - self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target - ): - # This is a duplicate of test_copy_list_of_files_to_new_directory and - # can eventually be removed. - source = fs_bulk_operations_scenario_0 - - target = fs_target - assert not fs.exists(target) - fs.cp([fs_join(source, "file1"), fs_join(source, "file2")], target) - - assert fs.isdir(target) - assert fs.isfile(fs_join(target, "file1")) - assert fs.isfile(fs_join(target, "file2")) - - def test_copy_directory_without_files_with_same_name_prefix( - self, - fs, - fs_join, - fs_target, - fs_dir_and_file_with_same_name_prefix, - supports_empty_directories, - ): - # Create the test dirs - source = fs_dir_and_file_with_same_name_prefix - target = fs_target - - # Test without glob - fs.cp(fs_join(source, "subdir"), target, recursive=True) - - assert fs.isfile(fs_join(target, "subfile.txt")) - assert not fs.isfile(fs_join(target, "subdir.txt")) - - fs.rm([fs_join(target, "subfile.txt")]) - if supports_empty_directories: - assert fs.ls(target) == [] - else: - assert not fs.exists(target) - - # Test with glob - fs.cp(fs_join(source, "subdir*"), target, recursive=True) - - assert fs.isdir(fs_join(target, "subdir")) - assert fs.isfile(fs_join(target, "subdir", "subfile.txt")) - assert fs.isfile(fs_join(target, "subdir.txt")) - - def test_copy_with_source_and_destination_as_list( - self, fs, fs_target, fs_join, fs_10_files_with_hashed_names - ): - # Create the test dir - source = fs_10_files_with_hashed_names - target = fs_target - - # Create list of files for source and destination - source_files = [] - destination_files = [] - for i in range(10): - hashed_i = md5(str(i).encode("utf-8")).hexdigest() - source_files.append(fs_join(source, f"{hashed_i}.txt")) - destination_files.append(fs_join(target, f"{hashed_i}.txt")) - - # Copy and assert order was kept - fs.copy(path1=source_files, path2=destination_files) - - for i in range(10): - file_content = fs.cat(destination_files[i]).decode("utf-8") - assert file_content == str(i) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/httpcore/_async/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/httpcore/_async/__init__.py deleted file mode 100644 index 88dc7f01e132933728cbcf45c88ce82e85ddf65f..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/httpcore/_async/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -from .connection import AsyncHTTPConnection -from .connection_pool import AsyncConnectionPool -from .http11 import AsyncHTTP11Connection -from .http_proxy import AsyncHTTPProxy -from .interfaces import AsyncConnectionInterface - -try: - from .http2 import AsyncHTTP2Connection -except ImportError: # pragma: nocover - - class AsyncHTTP2Connection: # type: ignore - def __init__(self, *args, **kwargs) -> None: # type: ignore - raise RuntimeError( - "Attempted to use http2 support, but the `h2` package is not " - "installed. Use 'pip install httpcore[http2]'." - ) - - -try: - from .socks_proxy import AsyncSOCKSProxy -except ImportError: # pragma: nocover - - class AsyncSOCKSProxy: # type: ignore - def __init__(self, *args, **kwargs) -> None: # type: ignore - raise RuntimeError( - "Attempted to use SOCKS support, but the `socksio` package is not " - "installed. Use 'pip install httpcore[socks]'." - ) - - -__all__ = [ - "AsyncHTTPConnection", - "AsyncConnectionPool", - "AsyncHTTPProxy", - "AsyncHTTP11Connection", - "AsyncHTTP2Connection", - "AsyncConnectionInterface", - "AsyncSOCKSProxy", -] diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_integrity.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_integrity.py deleted file mode 100644 index 45dd484eff4c61542f9985a52fc1db4dce8c020b..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_integrity.py +++ /dev/null @@ -1,281 +0,0 @@ -import re - -import numpy as np -import pytest - -from pandas._libs import index as libindex - -from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike - -import pandas as pd -from pandas import ( - Index, - IntervalIndex, - MultiIndex, - RangeIndex, -) -import pandas._testing as tm - - -def test_labels_dtypes(): - # GH 8456 - i = MultiIndex.from_tuples([("A", 1), ("A", 2)]) - assert i.codes[0].dtype == "int8" - assert i.codes[1].dtype == "int8" - - i = MultiIndex.from_product([["a"], range(40)]) - assert i.codes[1].dtype == "int8" - i = MultiIndex.from_product([["a"], range(400)]) - assert i.codes[1].dtype == "int16" - i = MultiIndex.from_product([["a"], range(40000)]) - assert i.codes[1].dtype == "int32" - - i = MultiIndex.from_product([["a"], range(1000)]) - assert (i.codes[0] >= 0).all() - assert (i.codes[1] >= 0).all() - - -def test_values_boxed(): - tuples = [ - (1, pd.Timestamp("2000-01-01")), - (2, pd.NaT), - (3, pd.Timestamp("2000-01-03")), - (1, pd.Timestamp("2000-01-04")), - (2, pd.Timestamp("2000-01-02")), - (3, pd.Timestamp("2000-01-03")), - ] - result = MultiIndex.from_tuples(tuples) - expected = construct_1d_object_array_from_listlike(tuples) - tm.assert_numpy_array_equal(result.values, expected) - # Check that code branches for boxed values produce identical results - tm.assert_numpy_array_equal(result.values[:4], result[:4].values) - - -def test_values_multiindex_datetimeindex(): - # Test to ensure we hit the boxing / nobox part of MI.values - ints = np.arange(10**18, 10**18 + 5) - naive = pd.DatetimeIndex(ints) - - aware = pd.DatetimeIndex(ints, tz="US/Central") - - idx = MultiIndex.from_arrays([naive, aware]) - result = idx.values - - outer = pd.DatetimeIndex([x[0] for x in result]) - tm.assert_index_equal(outer, naive) - - inner = pd.DatetimeIndex([x[1] for x in result]) - tm.assert_index_equal(inner, aware) - - # n_lev > n_lab - result = idx[:2].values - - outer = pd.DatetimeIndex([x[0] for x in result]) - tm.assert_index_equal(outer, naive[:2]) - - inner = pd.DatetimeIndex([x[1] for x in result]) - tm.assert_index_equal(inner, aware[:2]) - - -def test_values_multiindex_periodindex(): - # Test to ensure we hit the boxing / nobox part of MI.values - ints = np.arange(2007, 2012) - pidx = pd.PeriodIndex(ints, freq="D") - - idx = MultiIndex.from_arrays([ints, pidx]) - result = idx.values - - outer = Index([x[0] for x in result]) - tm.assert_index_equal(outer, Index(ints, dtype=np.int64)) - - inner = pd.PeriodIndex([x[1] for x in result]) - tm.assert_index_equal(inner, pidx) - - # n_lev > n_lab - result = idx[:2].values - - outer = Index([x[0] for x in result]) - tm.assert_index_equal(outer, Index(ints[:2], dtype=np.int64)) - - inner = pd.PeriodIndex([x[1] for x in result]) - tm.assert_index_equal(inner, pidx[:2]) - - -def test_consistency(): - # need to construct an overflow - major_axis = list(range(70000)) - minor_axis = list(range(10)) - - major_codes = np.arange(70000) - minor_codes = np.repeat(range(10), 7000) - - # the fact that is works means it's consistent - index = MultiIndex( - levels=[major_axis, minor_axis], codes=[major_codes, minor_codes] - ) - - # inconsistent - major_codes = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3]) - minor_codes = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1]) - index = MultiIndex( - levels=[major_axis, minor_axis], codes=[major_codes, minor_codes] - ) - - assert index.is_unique is False - - -@pytest.mark.slow -def test_hash_collisions(): - # non-smoke test that we don't get hash collisions - - index = MultiIndex.from_product( - [np.arange(1000), np.arange(1000)], names=["one", "two"] - ) - result = index.get_indexer(index.values) - tm.assert_numpy_array_equal(result, np.arange(len(index), dtype="intp")) - - for i in [0, 1, len(index) - 2, len(index) - 1]: - result = index.get_loc(index[i]) - assert result == i - - -def test_dims(): - pass - - -def test_take_invalid_kwargs(): - vals = [["A", "B"], [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]] - idx = MultiIndex.from_product(vals, names=["str", "dt"]) - indices = [1, 2] - - msg = r"take\(\) got an unexpected keyword argument 'foo'" - with pytest.raises(TypeError, match=msg): - idx.take(indices, foo=2) - - msg = "the 'out' parameter is not supported" - with pytest.raises(ValueError, match=msg): - idx.take(indices, out=indices) - - msg = "the 'mode' parameter is not supported" - with pytest.raises(ValueError, match=msg): - idx.take(indices, mode="clip") - - -def test_isna_behavior(idx): - # should not segfault GH5123 - # NOTE: if MI representation changes, may make sense to allow - # isna(MI) - msg = "isna is not defined for MultiIndex" - with pytest.raises(NotImplementedError, match=msg): - pd.isna(idx) - - -def test_large_multiindex_error(): - # GH12527 - df_below_1000000 = pd.DataFrame( - 1, index=MultiIndex.from_product([[1, 2], range(499999)]), columns=["dest"] - ) - with pytest.raises(KeyError, match=r"^\(-1, 0\)$"): - df_below_1000000.loc[(-1, 0), "dest"] - with pytest.raises(KeyError, match=r"^\(3, 0\)$"): - df_below_1000000.loc[(3, 0), "dest"] - df_above_1000000 = pd.DataFrame( - 1, index=MultiIndex.from_product([[1, 2], range(500001)]), columns=["dest"] - ) - with pytest.raises(KeyError, match=r"^\(-1, 0\)$"): - df_above_1000000.loc[(-1, 0), "dest"] - with pytest.raises(KeyError, match=r"^\(3, 0\)$"): - df_above_1000000.loc[(3, 0), "dest"] - - -def test_mi_hashtable_populated_attribute_error(monkeypatch): - # GH 18165 - monkeypatch.setattr(libindex, "_SIZE_CUTOFF", 50) - r = range(50) - df = pd.DataFrame({"a": r, "b": r}, index=MultiIndex.from_arrays([r, r])) - - msg = "'Series' object has no attribute 'foo'" - with pytest.raises(AttributeError, match=msg): - df["a"].foo() - - -def test_can_hold_identifiers(idx): - key = idx[0] - assert idx._can_hold_identifiers_and_holds_name(key) is True - - -def test_metadata_immutable(idx): - levels, codes = idx.levels, idx.codes - # shouldn't be able to set at either the top level or base level - mutable_regex = re.compile("does not support mutable operations") - with pytest.raises(TypeError, match=mutable_regex): - levels[0] = levels[0] - with pytest.raises(TypeError, match=mutable_regex): - levels[0][0] = levels[0][0] - # ditto for labels - with pytest.raises(TypeError, match=mutable_regex): - codes[0] = codes[0] - with pytest.raises(ValueError, match="assignment destination is read-only"): - codes[0][0] = codes[0][0] - # and for names - names = idx.names - with pytest.raises(TypeError, match=mutable_regex): - names[0] = names[0] - - -def test_level_setting_resets_attributes(): - ind = MultiIndex.from_arrays([["A", "A", "B", "B", "B"], [1, 2, 1, 2, 3]]) - assert ind.is_monotonic_increasing - ind = ind.set_levels([["A", "B"], [1, 3, 2]]) - # if this fails, probably didn't reset the cache correctly. - assert not ind.is_monotonic_increasing - - -def test_rangeindex_fallback_coercion_bug(): - # GH 12893 - df1 = pd.DataFrame(np.arange(100).reshape((10, 10))) - df2 = pd.DataFrame(np.arange(100).reshape((10, 10))) - df = pd.concat( - {"df1": df1.stack(future_stack=True), "df2": df2.stack(future_stack=True)}, - axis=1, - ) - df.index.names = ["fizz", "buzz"] - - str(df) - expected = pd.DataFrame( - {"df2": np.arange(100), "df1": np.arange(100)}, - index=MultiIndex.from_product([range(10), range(10)], names=["fizz", "buzz"]), - ) - tm.assert_frame_equal(df, expected, check_like=True) - - result = df.index.get_level_values("fizz") - expected = Index(np.arange(10, dtype=np.int64), name="fizz").repeat(10) - tm.assert_index_equal(result, expected) - - result = df.index.get_level_values("buzz") - expected = Index(np.tile(np.arange(10, dtype=np.int64), 10), name="buzz") - tm.assert_index_equal(result, expected) - - -def test_memory_usage(idx): - result = idx.memory_usage() - if len(idx): - idx.get_loc(idx[0]) - result2 = idx.memory_usage() - result3 = idx.memory_usage(deep=True) - - # RangeIndex, IntervalIndex - # don't have engines - if not isinstance(idx, (RangeIndex, IntervalIndex)): - assert result2 > result - - if idx.inferred_type == "object": - assert result3 > result2 - - else: - # we report 0 for no-length - assert result == 0 - - -def test_nlevels(idx): - assert idx.nlevels == 2 diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/tslibs/test_parse_iso8601.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/tslibs/test_parse_iso8601.py deleted file mode 100644 index 1992faae2ea6a687f8bd74b4e1e10ba53bb9e901..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/tslibs/test_parse_iso8601.py +++ /dev/null @@ -1,119 +0,0 @@ -from datetime import datetime - -import pytest - -from pandas._libs import tslib - -from pandas import Timestamp - - -@pytest.mark.parametrize( - "date_str, exp", - [ - ("2011-01-02", datetime(2011, 1, 2)), - ("2011-1-2", datetime(2011, 1, 2)), - ("2011-01", datetime(2011, 1, 1)), - ("2011-1", datetime(2011, 1, 1)), - ("2011 01 02", datetime(2011, 1, 2)), - ("2011.01.02", datetime(2011, 1, 2)), - ("2011/01/02", datetime(2011, 1, 2)), - ("2011\\01\\02", datetime(2011, 1, 2)), - ("2013-01-01 05:30:00", datetime(2013, 1, 1, 5, 30)), - ("2013-1-1 5:30:00", datetime(2013, 1, 1, 5, 30)), - ("2013-1-1 5:30:00+01:00", Timestamp(2013, 1, 1, 5, 30, tz="UTC+01:00")), - ], -) -def test_parsers_iso8601(date_str, exp): - # see gh-12060 - # - # Test only the ISO parser - flexibility to - # different separators and leading zero's. - actual = tslib._test_parse_iso8601(date_str) - assert actual == exp - - -@pytest.mark.parametrize( - "date_str", - [ - "2011-01/02", - "2011=11=11", - "201401", - "201111", - "200101", - # Mixed separated and unseparated. - "2005-0101", - "200501-01", - "20010101 12:3456", - "20010101 1234:56", - # HHMMSS must have two digits in - # each component if unseparated. - "20010101 1", - "20010101 123", - "20010101 12345", - "20010101 12345Z", - ], -) -def test_parsers_iso8601_invalid(date_str): - msg = f'Error parsing datetime string "{date_str}"' - - with pytest.raises(ValueError, match=msg): - tslib._test_parse_iso8601(date_str) - - -def test_parsers_iso8601_invalid_offset_invalid(): - date_str = "2001-01-01 12-34-56" - msg = f'Timezone hours offset out of range in datetime string "{date_str}"' - - with pytest.raises(ValueError, match=msg): - tslib._test_parse_iso8601(date_str) - - -def test_parsers_iso8601_leading_space(): - # GH#25895 make sure isoparser doesn't overflow with long input - date_str, expected = ("2013-1-1 5:30:00", datetime(2013, 1, 1, 5, 30)) - actual = tslib._test_parse_iso8601(" " * 200 + date_str) - assert actual == expected - - -@pytest.mark.parametrize( - "date_str, timespec, exp", - [ - ("2023-01-01 00:00:00", "auto", "2023-01-01T00:00:00"), - ("2023-01-01 00:00:00", "seconds", "2023-01-01T00:00:00"), - ("2023-01-01 00:00:00", "milliseconds", "2023-01-01T00:00:00.000"), - ("2023-01-01 00:00:00", "microseconds", "2023-01-01T00:00:00.000000"), - ("2023-01-01 00:00:00", "nanoseconds", "2023-01-01T00:00:00.000000000"), - ("2023-01-01 00:00:00.001", "auto", "2023-01-01T00:00:00.001000"), - ("2023-01-01 00:00:00.001", "seconds", "2023-01-01T00:00:00"), - ("2023-01-01 00:00:00.001", "milliseconds", "2023-01-01T00:00:00.001"), - ("2023-01-01 00:00:00.001", "microseconds", "2023-01-01T00:00:00.001000"), - ("2023-01-01 00:00:00.001", "nanoseconds", "2023-01-01T00:00:00.001000000"), - ("2023-01-01 00:00:00.000001", "auto", "2023-01-01T00:00:00.000001"), - ("2023-01-01 00:00:00.000001", "seconds", "2023-01-01T00:00:00"), - ("2023-01-01 00:00:00.000001", "milliseconds", "2023-01-01T00:00:00.000"), - ("2023-01-01 00:00:00.000001", "microseconds", "2023-01-01T00:00:00.000001"), - ("2023-01-01 00:00:00.000001", "nanoseconds", "2023-01-01T00:00:00.000001000"), - ("2023-01-01 00:00:00.000000001", "auto", "2023-01-01T00:00:00.000000001"), - ("2023-01-01 00:00:00.000000001", "seconds", "2023-01-01T00:00:00"), - ("2023-01-01 00:00:00.000000001", "milliseconds", "2023-01-01T00:00:00.000"), - ("2023-01-01 00:00:00.000000001", "microseconds", "2023-01-01T00:00:00.000000"), - ( - "2023-01-01 00:00:00.000000001", - "nanoseconds", - "2023-01-01T00:00:00.000000001", - ), - ("2023-01-01 00:00:00.000001001", "auto", "2023-01-01T00:00:00.000001001"), - ("2023-01-01 00:00:00.000001001", "seconds", "2023-01-01T00:00:00"), - ("2023-01-01 00:00:00.000001001", "milliseconds", "2023-01-01T00:00:00.000"), - ("2023-01-01 00:00:00.000001001", "microseconds", "2023-01-01T00:00:00.000001"), - ( - "2023-01-01 00:00:00.000001001", - "nanoseconds", - "2023-01-01T00:00:00.000001001", - ), - ], -) -def test_iso8601_formatter(date_str: str, timespec: str, exp: str): - # GH#53020 - ts = Timestamp(date_str) - assert ts.isoformat(timespec=timespec) == exp diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/_internal/_generics.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/_internal/_generics.py deleted file mode 100644 index 7c3d5f433396f0d66ffbca7903ab92df25219dd0..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/_internal/_generics.py +++ /dev/null @@ -1,520 +0,0 @@ -from __future__ import annotations - -import sys -import types -import typing -from collections import ChainMap -from contextlib import contextmanager -from contextvars import ContextVar -from types import prepare_class -from typing import TYPE_CHECKING, Any, Iterator, List, Mapping, MutableMapping, Tuple, TypeVar -from weakref import WeakValueDictionary - -import typing_extensions - -from ._core_utils import get_type_ref -from ._forward_ref import PydanticRecursiveRef -from ._typing_extra import TypeVarType, typing_base -from ._utils import all_identical, is_model_class - -if sys.version_info >= (3, 10): - from typing import _UnionGenericAlias # type: ignore[attr-defined] - -if TYPE_CHECKING: - from ..main import BaseModel - -GenericTypesCacheKey = Tuple[Any, Any, Tuple[Any, ...]] - -# Note: We want to remove LimitedDict, but to do this, we'd need to improve the handling of generics caching. -# Right now, to handle recursive generics, we some types must remain cached for brief periods without references. -# By chaining the WeakValuesDict with a LimitedDict, we have a way to retain caching for all types with references, -# while also retaining a limited number of types even without references. This is generally enough to build -# specific recursive generic models without losing required items out of the cache. - -KT = TypeVar('KT') -VT = TypeVar('VT') -_LIMITED_DICT_SIZE = 100 -if TYPE_CHECKING: - - class LimitedDict(dict, MutableMapping[KT, VT]): - def __init__(self, size_limit: int = _LIMITED_DICT_SIZE): - ... - -else: - - class LimitedDict(dict): - """Limit the size/length of a dict used for caching to avoid unlimited increase in memory usage. - - Since the dict is ordered, and we always remove elements from the beginning, this is effectively a FIFO cache. - """ - - def __init__(self, size_limit: int = _LIMITED_DICT_SIZE): - self.size_limit = size_limit - super().__init__() - - def __setitem__(self, __key: Any, __value: Any) -> None: - super().__setitem__(__key, __value) - if len(self) > self.size_limit: - excess = len(self) - self.size_limit + self.size_limit // 10 - to_remove = list(self.keys())[:excess] - for key in to_remove: - del self[key] - - def __class_getitem__(cls, *args: Any) -> Any: - # to avoid errors with 3.7 - return cls - - -# weak dictionaries allow the dynamically created parametrized versions of generic models to get collected -# once they are no longer referenced by the caller. -if sys.version_info >= (3, 9): # Typing for weak dictionaries available at 3.9 - GenericTypesCache = WeakValueDictionary[GenericTypesCacheKey, 'type[BaseModel]'] -else: - GenericTypesCache = WeakValueDictionary - -if TYPE_CHECKING: - - class DeepChainMap(ChainMap[KT, VT]): # type: ignore - ... - -else: - - class DeepChainMap(ChainMap): - """Variant of ChainMap that allows direct updates to inner scopes. - - Taken from https://docs.python.org/3/library/collections.html#collections.ChainMap, - with some light modifications for this use case. - """ - - def clear(self) -> None: - for mapping in self.maps: - mapping.clear() - - def __setitem__(self, key: KT, value: VT) -> None: - for mapping in self.maps: - mapping[key] = value - - def __delitem__(self, key: KT) -> None: - hit = False - for mapping in self.maps: - if key in mapping: - del mapping[key] - hit = True - if not hit: - raise KeyError(key) - - -# Despite the fact that LimitedDict _seems_ no longer necessary, I'm very nervous to actually remove it -# and discover later on that we need to re-add all this infrastructure... -# _GENERIC_TYPES_CACHE = DeepChainMap(GenericTypesCache(), LimitedDict()) - -_GENERIC_TYPES_CACHE = GenericTypesCache() - - -class PydanticGenericMetadata(typing_extensions.TypedDict): - origin: type[BaseModel] | None # analogous to typing._GenericAlias.__origin__ - args: tuple[Any, ...] # analogous to typing._GenericAlias.__args__ - parameters: tuple[type[Any], ...] # analogous to typing.Generic.__parameters__ - - -def create_generic_submodel( - model_name: str, origin: type[BaseModel], args: tuple[Any, ...], params: tuple[Any, ...] -) -> type[BaseModel]: - """Dynamically create a submodel of a provided (generic) BaseModel. - - This is used when producing concrete parametrizations of generic models. This function - only *creates* the new subclass; the schema/validators/serialization must be updated to - reflect a concrete parametrization elsewhere. - - Args: - model_name: The name of the newly created model. - origin: The base class for the new model to inherit from. - args: A tuple of generic metadata arguments. - params: A tuple of generic metadata parameters. - - Returns: - The created submodel. - """ - namespace: dict[str, Any] = {'__module__': origin.__module__} - bases = (origin,) - meta, ns, kwds = prepare_class(model_name, bases) - namespace.update(ns) - created_model = meta( - model_name, - bases, - namespace, - __pydantic_generic_metadata__={ - 'origin': origin, - 'args': args, - 'parameters': params, - }, - __pydantic_reset_parent_namespace__=False, - **kwds, - ) - - model_module, called_globally = _get_caller_frame_info(depth=3) - if called_globally: # create global reference and therefore allow pickling - object_by_reference = None - reference_name = model_name - reference_module_globals = sys.modules[created_model.__module__].__dict__ - while object_by_reference is not created_model: - object_by_reference = reference_module_globals.setdefault(reference_name, created_model) - reference_name += '_' - - return created_model - - -def _get_caller_frame_info(depth: int = 2) -> tuple[str | None, bool]: - """Used inside a function to check whether it was called globally. - - Args: - depth: The depth to get the frame. - - Returns: - A tuple contains `module_nam` and `called_globally`. - - Raises: - RuntimeError: If the function is not called inside a function. - """ - try: - previous_caller_frame = sys._getframe(depth) - except ValueError as e: - raise RuntimeError('This function must be used inside another function') from e - except AttributeError: # sys module does not have _getframe function, so there's nothing we can do about it - return None, False - frame_globals = previous_caller_frame.f_globals - return frame_globals.get('__name__'), previous_caller_frame.f_locals is frame_globals - - -DictValues: type[Any] = {}.values().__class__ - - -def iter_contained_typevars(v: Any) -> Iterator[TypeVarType]: - """Recursively iterate through all subtypes and type args of `v` and yield any typevars that are found. - - This is inspired as an alternative to directly accessing the `__parameters__` attribute of a GenericAlias, - since __parameters__ of (nested) generic BaseModel subclasses won't show up in that list. - """ - if isinstance(v, TypeVar): - yield v - elif is_model_class(v): - yield from v.__pydantic_generic_metadata__['parameters'] - elif isinstance(v, (DictValues, list)): - for var in v: - yield from iter_contained_typevars(var) - else: - args = get_args(v) - for arg in args: - yield from iter_contained_typevars(arg) - - -def get_args(v: Any) -> Any: - pydantic_generic_metadata: PydanticGenericMetadata | None = getattr(v, '__pydantic_generic_metadata__', None) - if pydantic_generic_metadata: - return pydantic_generic_metadata.get('args') - return typing_extensions.get_args(v) - - -def get_origin(v: Any) -> Any: - pydantic_generic_metadata: PydanticGenericMetadata | None = getattr(v, '__pydantic_generic_metadata__', None) - if pydantic_generic_metadata: - return pydantic_generic_metadata.get('origin') - return typing_extensions.get_origin(v) - - -def get_standard_typevars_map(cls: type[Any]) -> dict[TypeVarType, Any] | None: - """Package a generic type's typevars and parametrization (if present) into a dictionary compatible with the - `replace_types` function. Specifically, this works with standard typing generics and typing._GenericAlias. - """ - origin = get_origin(cls) - if origin is None: - return None - if not hasattr(origin, '__parameters__'): - return None - - # In this case, we know that cls is a _GenericAlias, and origin is the generic type - # So it is safe to access cls.__args__ and origin.__parameters__ - args: tuple[Any, ...] = cls.__args__ # type: ignore - parameters: tuple[TypeVarType, ...] = origin.__parameters__ - return dict(zip(parameters, args)) - - -def get_model_typevars_map(cls: type[BaseModel]) -> dict[TypeVarType, Any] | None: - """Package a generic BaseModel's typevars and concrete parametrization (if present) into a dictionary compatible - with the `replace_types` function. - - Since BaseModel.__class_getitem__ does not produce a typing._GenericAlias, and the BaseModel generic info is - stored in the __pydantic_generic_metadata__ attribute, we need special handling here. - """ - # TODO: This could be unified with `get_standard_typevars_map` if we stored the generic metadata - # in the __origin__, __args__, and __parameters__ attributes of the model. - generic_metadata = cls.__pydantic_generic_metadata__ - origin = generic_metadata['origin'] - args = generic_metadata['args'] - return dict(zip(iter_contained_typevars(origin), args)) - - -def replace_types(type_: Any, type_map: Mapping[Any, Any] | None) -> Any: - """Return type with all occurrences of `type_map` keys recursively replaced with their values. - - Args: - type_: The class or generic alias. - type_map: Mapping from `TypeVar` instance to concrete types. - - Returns: - A new type representing the basic structure of `type_` with all - `typevar_map` keys recursively replaced. - - Example: - ```py - from typing import List, Tuple, Union - - from pydantic._internal._generics import replace_types - - replace_types(Tuple[str, Union[List[str], float]], {str: int}) - #> Tuple[int, Union[List[int], float]] - ``` - """ - if not type_map: - return type_ - - type_args = get_args(type_) - origin_type = get_origin(type_) - - if origin_type is typing_extensions.Annotated: - annotated_type, *annotations = type_args - annotated = replace_types(annotated_type, type_map) - for annotation in annotations: - annotated = typing_extensions.Annotated[annotated, annotation] - return annotated - - # Having type args is a good indicator that this is a typing module - # class instantiation or a generic alias of some sort. - if type_args: - resolved_type_args = tuple(replace_types(arg, type_map) for arg in type_args) - if all_identical(type_args, resolved_type_args): - # If all arguments are the same, there is no need to modify the - # type or create a new object at all - return type_ - if ( - origin_type is not None - and isinstance(type_, typing_base) - and not isinstance(origin_type, typing_base) - and getattr(type_, '_name', None) is not None - ): - # In python < 3.9 generic aliases don't exist so any of these like `list`, - # `type` or `collections.abc.Callable` need to be translated. - # See: https://www.python.org/dev/peps/pep-0585 - origin_type = getattr(typing, type_._name) - assert origin_type is not None - # PEP-604 syntax (Ex.: list | str) is represented with a types.UnionType object that does not have __getitem__. - # We also cannot use isinstance() since we have to compare types. - if sys.version_info >= (3, 10) and origin_type is types.UnionType: - return _UnionGenericAlias(origin_type, resolved_type_args) - return origin_type[resolved_type_args] - - # We handle pydantic generic models separately as they don't have the same - # semantics as "typing" classes or generic aliases - - if not origin_type and is_model_class(type_): - parameters = type_.__pydantic_generic_metadata__['parameters'] - if not parameters: - return type_ - resolved_type_args = tuple(replace_types(t, type_map) for t in parameters) - if all_identical(parameters, resolved_type_args): - return type_ - return type_[resolved_type_args] - - # Handle special case for typehints that can have lists as arguments. - # `typing.Callable[[int, str], int]` is an example for this. - if isinstance(type_, (List, list)): - resolved_list = list(replace_types(element, type_map) for element in type_) - if all_identical(type_, resolved_list): - return type_ - return resolved_list - - # If all else fails, we try to resolve the type directly and otherwise just - # return the input with no modifications. - return type_map.get(type_, type_) - - -def has_instance_in_type(type_: Any, isinstance_target: Any) -> bool: - """Checks if the type, or any of its arbitrary nested args, satisfy - `isinstance(, isinstance_target)`. - """ - if isinstance(type_, isinstance_target): - return True - - type_args = get_args(type_) - origin_type = get_origin(type_) - - if origin_type is typing_extensions.Annotated: - annotated_type, *annotations = type_args - return has_instance_in_type(annotated_type, isinstance_target) - - # Having type args is a good indicator that this is a typing module - # class instantiation or a generic alias of some sort. - if any(has_instance_in_type(a, isinstance_target) for a in type_args): - return True - - # Handle special case for typehints that can have lists as arguments. - # `typing.Callable[[int, str], int]` is an example for this. - if isinstance(type_, (List, list)) and not isinstance(type_, typing_extensions.ParamSpec): - if any(has_instance_in_type(element, isinstance_target) for element in type_): - return True - - return False - - -def check_parameters_count(cls: type[BaseModel], parameters: tuple[Any, ...]) -> None: - """Check the generic model parameters count is equal. - - Args: - cls: The generic model. - parameters: A tuple of passed parameters to the generic model. - - Raises: - TypeError: If the passed parameters count is not equal to generic model parameters count. - """ - actual = len(parameters) - expected = len(cls.__pydantic_generic_metadata__['parameters']) - if actual != expected: - description = 'many' if actual > expected else 'few' - raise TypeError(f'Too {description} parameters for {cls}; actual {actual}, expected {expected}') - - -_generic_recursion_cache: ContextVar[set[str] | None] = ContextVar('_generic_recursion_cache', default=None) - - -@contextmanager -def generic_recursion_self_type( - origin: type[BaseModel], args: tuple[Any, ...] -) -> Iterator[PydanticRecursiveRef | None]: - """This contextmanager should be placed around the recursive calls used to build a generic type, - and accept as arguments the generic origin type and the type arguments being passed to it. - - If the same origin and arguments are observed twice, it implies that a self-reference placeholder - can be used while building the core schema, and will produce a schema_ref that will be valid in the - final parent schema. - """ - previously_seen_type_refs = _generic_recursion_cache.get() - if previously_seen_type_refs is None: - previously_seen_type_refs = set() - token = _generic_recursion_cache.set(previously_seen_type_refs) - else: - token = None - - try: - type_ref = get_type_ref(origin, args_override=args) - if type_ref in previously_seen_type_refs: - self_type = PydanticRecursiveRef(type_ref=type_ref) - yield self_type - else: - previously_seen_type_refs.add(type_ref) - yield None - finally: - if token: - _generic_recursion_cache.reset(token) - - -def recursively_defined_type_refs() -> set[str]: - visited = _generic_recursion_cache.get() - if not visited: - return set() # not in a generic recursion, so there are no types - - return visited.copy() # don't allow modifications - - -def get_cached_generic_type_early(parent: type[BaseModel], typevar_values: Any) -> type[BaseModel] | None: - """The use of a two-stage cache lookup approach was necessary to have the highest performance possible for - repeated calls to `__class_getitem__` on generic types (which may happen in tighter loops during runtime), - while still ensuring that certain alternative parametrizations ultimately resolve to the same type. - - As a concrete example, this approach was necessary to make Model[List[T]][int] equal to Model[List[int]]. - The approach could be modified to not use two different cache keys at different points, but the - _early_cache_key is optimized to be as quick to compute as possible (for repeated-access speed), and the - _late_cache_key is optimized to be as "correct" as possible, so that two types that will ultimately be the - same after resolving the type arguments will always produce cache hits. - - If we wanted to move to only using a single cache key per type, we would either need to always use the - slower/more computationally intensive logic associated with _late_cache_key, or would need to accept - that Model[List[T]][int] is a different type than Model[List[T]][int]. Because we rely on subclass relationships - during validation, I think it is worthwhile to ensure that types that are functionally equivalent are actually - equal. - """ - return _GENERIC_TYPES_CACHE.get(_early_cache_key(parent, typevar_values)) - - -def get_cached_generic_type_late( - parent: type[BaseModel], typevar_values: Any, origin: type[BaseModel], args: tuple[Any, ...] -) -> type[BaseModel] | None: - """See the docstring of `get_cached_generic_type_early` for more information about the two-stage cache lookup.""" - cached = _GENERIC_TYPES_CACHE.get(_late_cache_key(origin, args, typevar_values)) - if cached is not None: - set_cached_generic_type(parent, typevar_values, cached, origin, args) - return cached - - -def set_cached_generic_type( - parent: type[BaseModel], - typevar_values: tuple[Any, ...], - type_: type[BaseModel], - origin: type[BaseModel] | None = None, - args: tuple[Any, ...] | None = None, -) -> None: - """See the docstring of `get_cached_generic_type_early` for more information about why items are cached with - two different keys. - """ - _GENERIC_TYPES_CACHE[_early_cache_key(parent, typevar_values)] = type_ - if len(typevar_values) == 1: - _GENERIC_TYPES_CACHE[_early_cache_key(parent, typevar_values[0])] = type_ - if origin and args: - _GENERIC_TYPES_CACHE[_late_cache_key(origin, args, typevar_values)] = type_ - - -def _union_orderings_key(typevar_values: Any) -> Any: - """This is intended to help differentiate between Union types with the same arguments in different order. - - Thanks to caching internal to the `typing` module, it is not possible to distinguish between - List[Union[int, float]] and List[Union[float, int]] (and similarly for other "parent" origins besides List) - because `typing` considers Union[int, float] to be equal to Union[float, int]. - - However, you _can_ distinguish between (top-level) Union[int, float] vs. Union[float, int]. - Because we parse items as the first Union type that is successful, we get slightly more consistent behavior - if we make an effort to distinguish the ordering of items in a union. It would be best if we could _always_ - get the exact-correct order of items in the union, but that would require a change to the `typing` module itself. - (See https://github.com/python/cpython/issues/86483 for reference.) - """ - if isinstance(typevar_values, tuple): - args_data = [] - for value in typevar_values: - args_data.append(_union_orderings_key(value)) - return tuple(args_data) - elif typing_extensions.get_origin(typevar_values) is typing.Union: - return get_args(typevar_values) - else: - return () - - -def _early_cache_key(cls: type[BaseModel], typevar_values: Any) -> GenericTypesCacheKey: - """This is intended for minimal computational overhead during lookups of cached types. - - Note that this is overly simplistic, and it's possible that two different cls/typevar_values - inputs would ultimately result in the same type being created in BaseModel.__class_getitem__. - To handle this, we have a fallback _late_cache_key that is checked later if the _early_cache_key - lookup fails, and should result in a cache hit _precisely_ when the inputs to __class_getitem__ - would result in the same type. - """ - return cls, typevar_values, _union_orderings_key(typevar_values) - - -def _late_cache_key(origin: type[BaseModel], args: tuple[Any, ...], typevar_values: Any) -> GenericTypesCacheKey: - """This is intended for use later in the process of creating a new type, when we have more information - about the exact args that will be passed. If it turns out that a different set of inputs to - __class_getitem__ resulted in the same inputs to the generic type creation process, we can still - return the cached type, and update the cache with the _early_cache_key as well. - """ - # The _union_orderings_key is placed at the start here to ensure there cannot be a collision with an - # _early_cache_key, as that function will always produce a BaseModel subclass as the first item in the key, - # whereas this function will always produce a tuple as the first item in the key. - return _union_orderings_key(typevar_values), origin, args diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/urllib3/util/response.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/urllib3/util/response.py deleted file mode 100644 index 0f4578696fa2e17a900c6890ec26d65e860b0b72..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/urllib3/util/response.py +++ /dev/null @@ -1,101 +0,0 @@ -from __future__ import annotations - -import http.client as httplib -from email.errors import MultipartInvariantViolationDefect, StartBoundaryNotFoundDefect - -from ..exceptions import HeaderParsingError - - -def is_fp_closed(obj: object) -> bool: - """ - Checks whether a given file-like object is closed. - - :param obj: - The file-like object to check. - """ - - try: - # Check `isclosed()` first, in case Python3 doesn't set `closed`. - # GH Issue #928 - return obj.isclosed() # type: ignore[no-any-return, attr-defined] - except AttributeError: - pass - - try: - # Check via the official file-like-object way. - return obj.closed # type: ignore[no-any-return, attr-defined] - except AttributeError: - pass - - try: - # Check if the object is a container for another file-like object that - # gets released on exhaustion (e.g. HTTPResponse). - return obj.fp is None # type: ignore[attr-defined] - except AttributeError: - pass - - raise ValueError("Unable to determine whether fp is closed.") - - -def assert_header_parsing(headers: httplib.HTTPMessage) -> None: - """ - Asserts whether all headers have been successfully parsed. - Extracts encountered errors from the result of parsing headers. - - Only works on Python 3. - - :param http.client.HTTPMessage headers: Headers to verify. - - :raises urllib3.exceptions.HeaderParsingError: - If parsing errors are found. - """ - - # This will fail silently if we pass in the wrong kind of parameter. - # To make debugging easier add an explicit check. - if not isinstance(headers, httplib.HTTPMessage): - raise TypeError(f"expected httplib.Message, got {type(headers)}.") - - unparsed_data = None - - # get_payload is actually email.message.Message.get_payload; - # we're only interested in the result if it's not a multipart message - if not headers.is_multipart(): - payload = headers.get_payload() - - if isinstance(payload, (bytes, str)): - unparsed_data = payload - - # httplib is assuming a response body is available - # when parsing headers even when httplib only sends - # header data to parse_headers() This results in - # defects on multipart responses in particular. - # See: https://github.com/urllib3/urllib3/issues/800 - - # So we ignore the following defects: - # - StartBoundaryNotFoundDefect: - # The claimed start boundary was never found. - # - MultipartInvariantViolationDefect: - # A message claimed to be a multipart but no subparts were found. - defects = [ - defect - for defect in headers.defects - if not isinstance( - defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect) - ) - ] - - if defects or unparsed_data: - raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) - - -def is_response_to_head(response: httplib.HTTPResponse) -> bool: - """ - Checks whether the request of a response has been a HEAD-request. - - :param http.client.HTTPResponse response: - Response to check if the originating request - used 'HEAD' as a method. - """ - # FIXME: Can we do this somehow without accessing private httplib _method? - method_str = response._method # type: str # type: ignore[attr-defined] - return method_str.upper() == "HEAD" diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/uvicorn/protocols/utils.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/uvicorn/protocols/utils.py deleted file mode 100644 index d0697fe73b9389c1e9f58b198f1f44f810439cda..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/uvicorn/protocols/utils.py +++ /dev/null @@ -1,54 +0,0 @@ -import asyncio -import urllib.parse -from typing import Optional, Tuple - -from uvicorn._types import WWWScope - - -def get_remote_addr(transport: asyncio.Transport) -> Optional[Tuple[str, int]]: - socket_info = transport.get_extra_info("socket") - if socket_info is not None: - try: - info = socket_info.getpeername() - return (str(info[0]), int(info[1])) if isinstance(info, tuple) else None - except OSError: # pragma: no cover - # This case appears to inconsistently occur with uvloop - # bound to a unix domain socket. - return None - - info = transport.get_extra_info("peername") - if info is not None and isinstance(info, (list, tuple)) and len(info) == 2: - return (str(info[0]), int(info[1])) - return None - - -def get_local_addr(transport: asyncio.Transport) -> Optional[Tuple[str, int]]: - socket_info = transport.get_extra_info("socket") - if socket_info is not None: - info = socket_info.getsockname() - - return (str(info[0]), int(info[1])) if isinstance(info, tuple) else None - info = transport.get_extra_info("sockname") - if info is not None and isinstance(info, (list, tuple)) and len(info) == 2: - return (str(info[0]), int(info[1])) - return None - - -def is_ssl(transport: asyncio.Transport) -> bool: - return bool(transport.get_extra_info("sslcontext")) - - -def get_client_addr(scope: "WWWScope") -> str: - client = scope.get("client") - if not client: - return "" - return "%s:%d" % client - - -def get_path_with_query_string(scope: "WWWScope") -> str: - path_with_query_string = urllib.parse.quote(scope["path"]) - if scope["query_string"]: - path_with_query_string = "{}?{}".format( - path_with_query_string, scope["query_string"].decode("ascii") - ) - return path_with_query_string diff --git a/spaces/pyodide-demo/self-hosted/python-dateutil.js b/spaces/pyodide-demo/self-hosted/python-dateutil.js deleted file mode 100644 index edf7752b90a322b960dcddc6c99874fdb9329e7e..0000000000000000000000000000000000000000 --- a/spaces/pyodide-demo/self-hosted/python-dateutil.js +++ /dev/null @@ -1 +0,0 @@ -var Module=typeof globalThis.__pyodide_module!=="undefined"?globalThis.__pyodide_module:{};if(!Module.expectedDataFileDownloads){Module.expectedDataFileDownloads=0}Module.expectedDataFileDownloads++;(function(){var loadPackage=function(metadata){var PACKAGE_PATH="";if(typeof window==="object"){PACKAGE_PATH=window["encodeURIComponent"](window.location.pathname.toString().substring(0,window.location.pathname.toString().lastIndexOf("/"))+"/")}else if(typeof process==="undefined"&&typeof location!=="undefined"){PACKAGE_PATH=encodeURIComponent(location.pathname.toString().substring(0,location.pathname.toString().lastIndexOf("/"))+"/")}var PACKAGE_NAME="python-dateutil.data";var REMOTE_PACKAGE_BASE="python-dateutil.data";if(typeof Module["locateFilePackage"]==="function"&&!Module["locateFile"]){Module["locateFile"]=Module["locateFilePackage"];err("warning: you defined Module.locateFilePackage, that has been renamed to Module.locateFile (using your locateFilePackage for now)")}var REMOTE_PACKAGE_NAME=Module["locateFile"]?Module["locateFile"](REMOTE_PACKAGE_BASE,""):REMOTE_PACKAGE_BASE;var REMOTE_PACKAGE_SIZE=metadata["remote_package_size"];var PACKAGE_UUID=metadata["package_uuid"];function fetchRemotePackage(packageName,packageSize,callback,errback){if(typeof process==="object"){require("fs").readFile(packageName,(function(err,contents){if(err){errback(err)}else{callback(contents.buffer)}}));return}var xhr=new XMLHttpRequest;xhr.open("GET",packageName,true);xhr.responseType="arraybuffer";xhr.onprogress=function(event){var url=packageName;var size=packageSize;if(event.total)size=event.total;if(event.loaded){if(!xhr.addedTotal){xhr.addedTotal=true;if(!Module.dataFileDownloads)Module.dataFileDownloads={};Module.dataFileDownloads[url]={loaded:event.loaded,total:size}}else{Module.dataFileDownloads[url].loaded=event.loaded}var total=0;var loaded=0;var num=0;for(var download in Module.dataFileDownloads){var data=Module.dataFileDownloads[download];total+=data.total;loaded+=data.loaded;num++}total=Math.ceil(total*Module.expectedDataFileDownloads/num);if(Module["setStatus"])Module["setStatus"]("Downloading data... ("+loaded+"/"+total+")")}else if(!Module.dataFileDownloads){if(Module["setStatus"])Module["setStatus"]("Downloading data...")}};xhr.onerror=function(event){throw new Error("NetworkError for: "+packageName)};xhr.onload=function(event){if(xhr.status==200||xhr.status==304||xhr.status==206||xhr.status==0&&xhr.response){var packageData=xhr.response;callback(packageData)}else{throw new Error(xhr.statusText+" : "+xhr.responseURL)}};xhr.send(null)}function handleError(error){console.error("package error:",error)}var fetchedCallback=null;var fetched=Module["getPreloadedPackage"]?Module["getPreloadedPackage"](REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE):null;if(!fetched)fetchRemotePackage(REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE,(function(data){if(fetchedCallback){fetchedCallback(data);fetchedCallback=null}else{fetched=data}}),handleError);function runWithFS(){function assert(check,msg){if(!check)throw msg+(new Error).stack}Module["FS_createPath"]("/","lib",true,true);Module["FS_createPath"]("/lib","python3.9",true,true);Module["FS_createPath"]("/lib/python3.9","site-packages",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages","dateutil",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/dateutil","parser",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/dateutil","tz",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/dateutil","zoneinfo",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages","python_dateutil-2.8.2-py3.9.egg-info",true,true);function processPackageData(arrayBuffer){assert(arrayBuffer,"Loading data file failed.");assert(arrayBuffer instanceof ArrayBuffer,"bad input to processPackageData");var byteArray=new Uint8Array(arrayBuffer);var curr;var compressedData={data:null,cachedOffset:326106,cachedIndexes:[-1,-1],cachedChunks:[null,null],offsets:[0,1372,2834,4242,5632,6601,7642,8492,9576,10170,11108,11869,12474,13260,14092,15488,16646,17676,18669,19851,21197,22280,23464,24652,25425,26345,27097,27942,28941,30001,30946,31676,32516,33335,34550,35883,36876,37728,38683,39683,40626,41781,42810,43855,44945,45940,46694,47700,49022,50442,51919,52976,53752,54877,56025,56994,57911,59046,59932,61103,62309,63469,64312,65271,66321,67314,68146,69297,70541,71693,72813,73996,75244,76484,77373,78156,79019,79967,81403,82610,83689,84724,85850,87014,88249,89568,90776,92006,92965,94163,95194,96329,97711,98893,100127,101230,102147,103381,104744,105837,106996,108132,109180,110294,111478,112566,113811,114873,115951,117388,118398,119576,120844,121982,122782,123540,124652,125932,126915,127702,128803,130182,131591,133013,134140,135308,136630,137967,139155,140561,141907,143174,144511,146354,148402,150450,152507,154562,156610,158607,160664,162712,164760,166817,168865,170913,172961,175009,177057,179105,181153,183209,185266,187314,189362,191410,193458,195506,197554,199602,201659,203707,205755,207803,209851,211885,213933,215981,218029,220077,222134,224182,226239,228287,230335,232383,234440,236488,238536,240593,242641,244689,246744,248799,250847,252895,254943,256994,259042,261095,263152,265116,267171,269219,271267,273315,275363,277415,279463,281511,283551,285599,287640,289694,291742,293790,295847,297895,299952,302e3,304048,306096,308144,310192,312240,314288,316336,318386,320289,321195,322596,324055,325463],sizes:[1372,1462,1408,1390,969,1041,850,1084,594,938,761,605,786,832,1396,1158,1030,993,1182,1346,1083,1184,1188,773,920,752,845,999,1060,945,730,840,819,1215,1333,993,852,955,1e3,943,1155,1029,1045,1090,995,754,1006,1322,1420,1477,1057,776,1125,1148,969,917,1135,886,1171,1206,1160,843,959,1050,993,832,1151,1244,1152,1120,1183,1248,1240,889,783,863,948,1436,1207,1079,1035,1126,1164,1235,1319,1208,1230,959,1198,1031,1135,1382,1182,1234,1103,917,1234,1363,1093,1159,1136,1048,1114,1184,1088,1245,1062,1078,1437,1010,1178,1268,1138,800,758,1112,1280,983,787,1101,1379,1409,1422,1127,1168,1322,1337,1188,1406,1346,1267,1337,1843,2048,2048,2057,2055,2048,1997,2057,2048,2048,2057,2048,2048,2048,2048,2048,2048,2048,2056,2057,2048,2048,2048,2048,2048,2048,2048,2057,2048,2048,2048,2048,2034,2048,2048,2048,2048,2057,2048,2057,2048,2048,2048,2057,2048,2048,2057,2048,2048,2055,2055,2048,2048,2048,2051,2048,2053,2057,1964,2055,2048,2048,2048,2048,2052,2048,2048,2040,2048,2041,2054,2048,2048,2057,2048,2057,2048,2048,2048,2048,2048,2048,2048,2048,2050,1903,906,1401,1459,1408,643],successes:[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0,1,0,0,0,1,0,0,1,0,0,1,1,0,0,0,1,0,1,1,1,1,0,0,0,0,1,0,0,1,0,1,1,0,0,1,0,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1]};compressedData["data"]=byteArray;assert(typeof Module.LZ4==="object","LZ4 not present - was your app build with -s LZ4=1 ?");Module.LZ4.loadPackage({metadata:metadata,compressedData:compressedData},true);Module["removeRunDependency"]("datafile_python-dateutil.data")}Module["addRunDependency"]("datafile_python-dateutil.data");if(!Module.preloadResults)Module.preloadResults={};Module.preloadResults[PACKAGE_NAME]={fromCache:false};if(fetched){processPackageData(fetched);fetched=null}else{fetchedCallback=processPackageData}}if(Module["calledRun"]){runWithFS()}else{if(!Module["preRun"])Module["preRun"]=[];Module["preRun"].push(runWithFS)}};loadPackage({files:[{filename:"/lib/python3.9/site-packages/dateutil/__init__.py",start:0,end:222,audio:0},{filename:"/lib/python3.9/site-packages/dateutil/_common.py",start:222,end:1154,audio:0},{filename:"/lib/python3.9/site-packages/dateutil/_version.py",start:1154,end:1296,audio:0},{filename:"/lib/python3.9/site-packages/dateutil/easter.py",start:1296,end:3974,audio:0},{filename:"/lib/python3.9/site-packages/dateutil/relativedelta.py",start:3974,end:28878,audio:0},{filename:"/lib/python3.9/site-packages/dateutil/rrule.py",start:28878,end:95434,audio:0},{filename:"/lib/python3.9/site-packages/dateutil/tzwin.py",start:95434,end:95493,audio:0},{filename:"/lib/python3.9/site-packages/dateutil/utils.py",start:95493,end:97458,audio:0},{filename:"/lib/python3.9/site-packages/dateutil/parser/__init__.py",start:97458,end:99224,audio:0},{filename:"/lib/python3.9/site-packages/dateutil/parser/_parser.py",start:99224,end:158020,audio:0},{filename:"/lib/python3.9/site-packages/dateutil/parser/isoparser.py",start:158020,end:171267,audio:0},{filename:"/lib/python3.9/site-packages/dateutil/tz/__init__.py",start:171267,end:171711,audio:0},{filename:"/lib/python3.9/site-packages/dateutil/tz/_common.py",start:171711,end:184688,audio:0},{filename:"/lib/python3.9/site-packages/dateutil/tz/_factories.py",start:184688,end:187257,audio:0},{filename:"/lib/python3.9/site-packages/dateutil/tz/tz.py",start:187257,end:250114,audio:0},{filename:"/lib/python3.9/site-packages/dateutil/tz/win.py",start:250114,end:263049,audio:0},{filename:"/lib/python3.9/site-packages/dateutil/zoneinfo/__init__.py",start:263049,end:268938,audio:0},{filename:"/lib/python3.9/site-packages/dateutil/zoneinfo/rebuild.py",start:268938,end:271330,audio:0},{filename:"/lib/python3.9/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz",start:271330,end:445724,audio:0},{filename:"/lib/python3.9/site-packages/python_dateutil-2.8.2-py3.9.egg-info/PKG-INFO",start:445724,end:453915,audio:0},{filename:"/lib/python3.9/site-packages/python_dateutil-2.8.2-py3.9.egg-info/SOURCES.txt",start:453915,end:456072,audio:0},{filename:"/lib/python3.9/site-packages/python_dateutil-2.8.2-py3.9.egg-info/dependency_links.txt",start:456072,end:456073,audio:0},{filename:"/lib/python3.9/site-packages/python_dateutil-2.8.2-py3.9.egg-info/requires.txt",start:456073,end:456082,audio:0},{filename:"/lib/python3.9/site-packages/python_dateutil-2.8.2-py3.9.egg-info/top_level.txt",start:456082,end:456091,audio:0},{filename:"/lib/python3.9/site-packages/python_dateutil-2.8.2-py3.9.egg-info/zip-safe",start:456091,end:456092,audio:0}],remote_package_size:330202,package_uuid:"ce1bacd9-4ab7-4781-b630-713f6808e964"})})(); \ No newline at end of file diff --git a/spaces/pyodide-demo/self-hosted/pywavelets-tests.js b/spaces/pyodide-demo/self-hosted/pywavelets-tests.js deleted file mode 100644 index 1f96fbbb572923dda8a01e01ca8c000ed5a2ca8f..0000000000000000000000000000000000000000 --- a/spaces/pyodide-demo/self-hosted/pywavelets-tests.js +++ /dev/null @@ -1 +0,0 @@ -var Module=typeof globalThis.__pyodide_module!=="undefined"?globalThis.__pyodide_module:{};if(!Module.expectedDataFileDownloads){Module.expectedDataFileDownloads=0}Module.expectedDataFileDownloads++;(function(){var loadPackage=function(metadata){var PACKAGE_PATH="";if(typeof window==="object"){PACKAGE_PATH=window["encodeURIComponent"](window.location.pathname.toString().substring(0,window.location.pathname.toString().lastIndexOf("/"))+"/")}else if(typeof process==="undefined"&&typeof location!=="undefined"){PACKAGE_PATH=encodeURIComponent(location.pathname.toString().substring(0,location.pathname.toString().lastIndexOf("/"))+"/")}var PACKAGE_NAME="pywavelets-tests.data";var REMOTE_PACKAGE_BASE="pywavelets-tests.data";if(typeof Module["locateFilePackage"]==="function"&&!Module["locateFile"]){Module["locateFile"]=Module["locateFilePackage"];err("warning: you defined Module.locateFilePackage, that has been renamed to Module.locateFile (using your locateFilePackage for now)")}var REMOTE_PACKAGE_NAME=Module["locateFile"]?Module["locateFile"](REMOTE_PACKAGE_BASE,""):REMOTE_PACKAGE_BASE;var REMOTE_PACKAGE_SIZE=metadata["remote_package_size"];var PACKAGE_UUID=metadata["package_uuid"];function fetchRemotePackage(packageName,packageSize,callback,errback){if(typeof process==="object"){require("fs").readFile(packageName,(function(err,contents){if(err){errback(err)}else{callback(contents.buffer)}}));return}var xhr=new XMLHttpRequest;xhr.open("GET",packageName,true);xhr.responseType="arraybuffer";xhr.onprogress=function(event){var url=packageName;var size=packageSize;if(event.total)size=event.total;if(event.loaded){if(!xhr.addedTotal){xhr.addedTotal=true;if(!Module.dataFileDownloads)Module.dataFileDownloads={};Module.dataFileDownloads[url]={loaded:event.loaded,total:size}}else{Module.dataFileDownloads[url].loaded=event.loaded}var total=0;var loaded=0;var num=0;for(var download in Module.dataFileDownloads){var data=Module.dataFileDownloads[download];total+=data.total;loaded+=data.loaded;num++}total=Math.ceil(total*Module.expectedDataFileDownloads/num);if(Module["setStatus"])Module["setStatus"]("Downloading data... ("+loaded+"/"+total+")")}else if(!Module.dataFileDownloads){if(Module["setStatus"])Module["setStatus"]("Downloading data...")}};xhr.onerror=function(event){throw new Error("NetworkError for: "+packageName)};xhr.onload=function(event){if(xhr.status==200||xhr.status==304||xhr.status==206||xhr.status==0&&xhr.response){var packageData=xhr.response;callback(packageData)}else{throw new Error(xhr.statusText+" : "+xhr.responseURL)}};xhr.send(null)}function handleError(error){console.error("package error:",error)}var fetchedCallback=null;var fetched=Module["getPreloadedPackage"]?Module["getPreloadedPackage"](REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE):null;if(!fetched)fetchRemotePackage(REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE,(function(data){if(fetchedCallback){fetchedCallback(data);fetchedCallback=null}else{fetched=data}}),handleError);function runWithFS(){function assert(check,msg){if(!check)throw msg+(new Error).stack}Module["FS_createPath"]("/","lib",true,true);Module["FS_createPath"]("/lib","python3.9",true,true);Module["FS_createPath"]("/lib/python3.9","site-packages",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages","pywt",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/pywt","tests",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/pywt/tests","data",true,true);function processPackageData(arrayBuffer){assert(arrayBuffer,"Loading data file failed.");assert(arrayBuffer instanceof ArrayBuffer,"bad input to processPackageData");var byteArray=new Uint8Array(arrayBuffer);var curr;var compressedData={data:null,cachedOffset:4075414,cachedIndexes:[-1,-1],cachedChunks:[null,null],offsets:[0,1168,2102,3241,4301,5455,6551,7508,8048,8652,9727,10992,12136,13086,14287,15351,16445,17382,18375,19374,20681,21590,22600,23908,24964,26104,27330,28214,29423,30519,31625,32705,33841,35202,36128,37043,38176,38882,39844,40917,42169,43532,44681,45815,46802,47929,48965,50007,51062,52243,53074,54156,55152,55891,56533,57666,58578,59855,61046,62135,63575,64729,65653,66833,68190,69205,70289,71486,72747,73561,74690,75518,76801,77490,78417,79602,80578,81195,82106,83230,84233,85240,86376,87445,88600,89616,90694,91448,92687,93903,94909,95964,97292,99340,101388,103436,105484,107532,109580,111628,113676,115724,117772,119820,121868,123916,125964,128012,130060,132108,134156,136204,138252,140300,142348,144396,146451,148499,150547,152595,154643,156691,158739,160787,162835,164883,166931,168979,171027,173075,175123,177171,179219,181267,183315,185363,187064,189112,191160,193208,195256,197304,198960,201008,203056,205104,207152,209200,211248,213296,215344,217392,219440,221488,223536,225584,227632,229680,231728,233776,235824,237872,239920,241968,244016,246064,248112,250160,252208,254256,256304,258352,260400,262448,264496,266544,268592,270640,272688,274736,276784,278832,280880,282928,284976,287024,289072,291120,293168,295216,297264,299312,301360,303408,305456,307504,309552,311600,313648,315696,317744,319792,321840,323888,325936,327984,330032,332080,334128,336176,338224,340272,342320,344368,346416,348464,350512,352560,354608,356375,358423,360471,362519,364567,366615,368663,370711,372759,374807,376855,378903,380951,382999,385047,387095,389143,391191,393239,395287,397344,399392,401440,403488,405536,407584,409632,411680,413728,415776,417824,419872,421920,423968,426016,428064,430112,432160,434208,436036,438084,440132,442180,444228,446276,448324,450372,452420,454468,456516,458564,460612,462660,464708,466756,468804,470852,472900,474948,476996,479044,481092,483140,484743,486791,488839,490887,492935,494983,497031,499079,500933,502981,505029,507077,509125,511173,513221,515269,517317,519365,521413,523461,525509,527557,529605,531653,533701,535749,537797,539845,541900,543948,545996,548044,550092,552140,554188,556236,558284,560332,562380,564428,566476,568524,570572,572620,574676,576724,578772,580820,582868,584916,586964,589012,591060,593108,594968,597016,599072,601120,603168,605216,607264,609312,611360,613408,615456,617504,619552,621600,623648,625696,627744,629792,631840,633888,635936,637984,640032,642080,644128,646176,648224,650272,652320,654332,656380,658428,660476,662524,664572,666620,668668,670716,672764,674812,676860,678908,680956,683004,685052,687100,689148,691196,693244,695292,697340,699388,701436,703484,705532,707580,709628,711676,713724,715772,717820,719868,721916,723964,726012,728060,730108,732156,734204,736252,738300,740348,742396,744444,746492,748540,750588,752636,754684,756732,758780,760828,762876,764924,766972,769020,771068,773116,775164,777212,779260,781308,783356,785404,787452,789500,791548,793596,795644,797692,799740,801788,803836,805884,807932,809980,812028,814076,816124,818172,820220,822268,824316,826364,828412,830460,832508,834556,836604,838652,840700,842748,844558,846606,848654,850702,852750,854798,856846,858894,860942,862990,865038,867086,869134,871182,873230,875278,877326,879374,881422,883470,885518,887566,889614,891662,893710,895758,897806,899854,901902,903950,905998,908046,910094,912142,914198,916246,918294,920342,922390,924438,926486,928534,930582,932630,934678,936726,938774,940822,942870,944918,946966,949014,951062,953110,955158,957206,959254,961302,963350,965398,967446,969494,971542,973590,975638,977686,979734,981782,983830,985878,987926,989974,992022,994070,996118,998166,1000214,1002270,1004318,1006366,1008414,1010462,1012510,1014558,1016606,1018654,1020702,1022750,1024798,1026846,1028894,1030942,1032990,1035046,1037094,1039142,1041190,1043238,1045286,1047334,1049382,1051430,1053478,1055526,1057574,1059622,1061670,1063718,1065766,1067814,1069862,1071910,1073760,1075808,1077856,1079904,1081952,1084e3,1086048,1088096,1090010,1092058,1094106,1096154,1098202,1100250,1102273,1104321,1106369,1108417,1110465,1112513,1114561,1116609,1118657,1120705,1122753,1124801,1126849,1128897,1130945,1132993,1135041,1137089,1139137,1141185,1143233,1145281,1147329,1149377,1151425,1153473,1155521,1157569,1159617,1161665,1163713,1165761,1167809,1169857,1171905,1173953,1176001,1178049,1180097,1182145,1184193,1186241,1188289,1190337,1192385,1194433,1196481,1198529,1200577,1202625,1204673,1206721,1208769,1210817,1212873,1214921,1216969,1219017,1221065,1223113,1225161,1227209,1229257,1231305,1233353,1235401,1237449,1239497,1241545,1243593,1245641,1247689,1249737,1251785,1253833,1255881,1257929,1259977,1262025,1264073,1266121,1268169,1270217,1272265,1274313,1276361,1278409,1280457,1282505,1284553,1286601,1288649,1290697,1292745,1294793,1296841,1298889,1300937,1302985,1305033,1307081,1309129,1311177,1312844,1314892,1316940,1318988,1321036,1323084,1325132,1327180,1329228,1331276,1333332,1335380,1337241,1339289,1341337,1343385,1345433,1347481,1349529,1351577,1353625,1355673,1357721,1359769,1361817,1363865,1365913,1367961,1370009,1372057,1374105,1376153,1378201,1380249,1382297,1384345,1386393,1388441,1390489,1392537,1394585,1396633,1398681,1400729,1402777,1404825,1406873,1408921,1410969,1413017,1415065,1417113,1419161,1421209,1423257,1425305,1427353,1429401,1431449,1433497,1435545,1437593,1439641,1441689,1443737,1445785,1447833,1449881,1451929,1453977,1456025,1458073,1460121,1461734,1463782,1465830,1467878,1469926,1471974,1474022,1476070,1478118,1480166,1482214,1484262,1486310,1488358,1490406,1492454,1494502,1496550,1498598,1500646,1502694,1504742,1506790,1508838,1510886,1512934,1514982,1517030,1519078,1521126,1523174,1525222,1527270,1529318,1531366,1533414,1535462,1537510,1539558,1541606,1543654,1545702,1547750,1549798,1551846,1553894,1555942,1557990,1560038,1562086,1564134,1566182,1568230,1570278,1572326,1574374,1576422,1578470,1580518,1582090,1584138,1586186,1588234,1590282,1592330,1594202,1596250,1598298,1600346,1602394,1604442,1606490,1608538,1610586,1612634,1614682,1616730,1618778,1620826,1622874,1624922,1626970,1629018,1631066,1633114,1635162,1637210,1639258,1641306,1643354,1645402,1647450,1649498,1651546,1653594,1655642,1657690,1659738,1661786,1663834,1665882,1667930,1669978,1672026,1674074,1675981,1678029,1680077,1682125,1684173,1686221,1688269,1690317,1692365,1694413,1696470,1698518,1700566,1702614,1704670,1706718,1708766,1710814,1712870,1714918,1716966,1719014,1721062,1723110,1725158,1727206,1729254,1731280,1733328,1735376,1737424,1739472,1741520,1743568,1745616,1747664,1749712,1751760,1753808,1755856,1757904,1759952,1762e3,1764048,1766096,1768144,1770192,1772240,1774288,1776336,1778384,1780432,1782480,1784528,1786576,1788624,1790672,1792720,1794768,1796816,1798864,1800912,1802960,1805008,1807056,1808851,1810899,1812947,1814995,1817043,1819091,1820668,1822716,1824764,1826812,1828860,1830908,1832956,1835004,1837052,1839100,1841148,1843196,1845244,1847292,1849340,1851388,1853436,1855484,1857532,1859580,1861628,1863676,1865724,1867772,1869820,1871868,1873916,1875964,1878012,1880060,1882108,1884156,1886204,1888252,1890300,1892348,1894396,1896444,1898492,1900540,1902588,1904525,1905393,1906252,1907106,1907625,1907891,1908157,1908524,1908854,1909223,1909592,1910099,1910582,1911112,1911688,1912196,1912773,1913359,1913991,1914637,1915192,1915796,1916466,1917113,1917786,1918452,1919131,1919810,1920469,1921170,1921952,1922711,1923444,1924177,1924968,1925730,1926420,1927171,1927998,1928840,1929687,1930517,1931279,1932199,1933021,1933771,1934566,1935422,1936271,1937175,1938090,1938996,1939910,1940843,1941751,1942509,1943351,1944340,1945308,1946241,1947189,1948053,1949038,1949999,1950985,1951839,1952763,1953730,1954789,1955770,1956778,1957724,1958767,1959877,1960925,1961948,1962903,1963820,1964985,1966060,1967126,1968265,1969244,1970324,1971436,1972534,1973647,1974547,1975570,1976664,1977783,1978870,1979978,1981074,1982065,1983198,1984304,1985428,1986421,1987404,1988363,1989450,1990587,1991762,1992876,1993978,1995055,1996191,1997268,1998492,1999617,2000570,2001696,2002827,2004097,2005175,2006405,2007520,2008654,2009795,2011015,2012223,2013263,2014343,2015354,2016375,2017593,2018746,2019859,2021099,2022288,2023345,2024421,2025628,2026901,2028187,2029399,2030241,2031405,2032553,2033816,2035101,2036353,2037583,2038856,2039995,2041209,2042549,2043855,2045079,2046246,2047231,2048358,2049561,2050841,2052154,2053454,2054741,2056026,2057207,2058435,2059646,2060902,2062293,2063688,2064625,2065856,2067017,2068440,2069738,2070950,2072247,2073635,2074966,2076133,2077486,2078813,2080202,2081643,2082927,2083854,2085115,2086298,2087762,2089066,2090428,2091658,2093049,2094362,2095667,2096937,2098266,2099595,2100991,2102331,2103567,2104678,2105994,2107210,2108609,2109980,2111367,2112846,2114262,2115741,2117002,2118278,2119695,2121088,2122420,2123862,2125076,2126227,2127523,2128854,2130102,2131439,2132800,2134197,2135563,2136939,2138329,2139724,2141110,2142508,2143937,2145350,2146744,2147613,2148958,2150143,2151611,2153103,2154605,2156124,2157639,2159171,2160681,2162053,2163380,2164800,2166242,2167627,2169064,2170478,2171453,2172870,2174172,2175627,2177118,2178583,2180043,2181495,2182932,2184375,2185839,2187200,2188721,2190255,2191765,2193271,2194759,2196095,2197169,2198646,2200018,2201646,2203303,2204892,2206409,2207920,2209426,2210970,2212460,2213890,2215522,2217103,2218618,2220125,2221684,2223088,2224049,2225511,2226834,2228314,2229917,2231444,2232886,2234307,2235849,2237292,2238729,2240014,2241479,2242968,2244408,2245866,2247490,2248983,2250189,2251516,2252920,2254414,2255988,2257694,2259213,2260799,2262358,2263808,2265377,2266833,2268130,2269622,2271125,2272608,2274221,2275734,2277421,2278810,2279985,2281569,2282978,2284527,2286154,2287617,2289173,2290651,2292312,2293850,2295563,2297057,2298519,2300219,2301758,2303404,2304904,2306477,2308039,2309231,2310683,2312177,2313718,2315409,2316939,2318591,2320121,2321666,2323292,2324832,2326539,2328078,2329607,2331201,2332868,2334340,2335906,2337537,2339087,2340478,2341643,2343264,2344611,2346260,2347837,2349332,2350951,2352617,2354222,2355868,2357427,2358939,2360300,2361837,2363502,2365173,2366775,2368434,2370048,2371535,2372779,2374236,2375787,2377320,2378937,2380524,2382150,2383761,2385360,2387019,2388671,2390274,2391815,2393244,2394871,2396435,2398174,2399822,2401446,2403009,2404643,2406130,2407337,2408974,2410326,2411962,2413610,2415216,2416908,2418568,2420204,2421876,2423538,2425149,2426839,2428321,2429895,2431472,2433173,2434861,2436519,2438155,2439820,2441007,2442166,2443840,2445351,2446970,2448560,2450218,2451921,2453596,2455244,2456907,2458616,2460282,2461954,2463279,2464944,2466614,2468344,2469996,2471616,2473286,2474992,2476435,2477378,2479002,2480431,2482100,2483817,2485487,2487114,2488739,2490448,2492143,2493795,2495470,2497170,2498710,2500222,2501868,2503508,2505212,2506903,2508584,2510261,2511945,2513412,2514404,2516052,2517556,2519232,2520941,2522617,2524261,2525887,2527592,2529259,2530926,2532608,2534321,2536014,2537506,2539163,2540830,2542557,2544213,2545859,2547560,2549310,2551042,2552001,2553461,2555018,2556550,2558315,2560040,2561775,2563446,2565079,2566765,2568467,2570142,2571804,2573580,2575257,2576781,2578453,2580223,2582007,2583699,2585334,2587013,2588714,2590392,2591389,2592889,2594469,2596048,2597800,2599493,2601214,2602880,2604516,2606289,2608102,2609765,2611460,2613174,2614878,2616388,2618065,2619818,2621652,2623308,2624927,2626631,2628405,2630173,2631541,2632683,2634332,2635882,2637553,2639325,2641098,2642787,2644486,2646160,2647979,2649686,2651386,2653051,2654899,2656540,2658157,2659839,2661521,2663253,2665089,2666691,2668401,2670099,2671928,2673321,2674456,2676225,2677846,2679510,2681282,2682963,2684678,2686533,2688176,2689885,2691726,2693463,2695156,2696917,2698713,2700214,2701945,2703655,2705415,2707214,2708905,2710620,2712436,2714143,2715861,2716928,2718337,2720055,2721580,2723399,2725145,2726853,2728656,2730370,2732048,2733893,2735611,2737297,2739137,2740838,2742545,2744072,2745924,2747642,2749335,2751198,2752873,2754603,2756417,2758124,2759974,2761089,2762502,2764344,2765871,2767577,2769278,2771126,2772831,2774661,2776372,2778087,2779958,2781644,2783492,2785209,2786934,2788626,2790335,2792034,2793865,2794815,2795229,2795672,2796120,2796579,2797041,2797542,2798077,2798575,2799047,2799574,2800100,2800609,2801175,2801726,2802286,2802796,2803337,2803945,2804595,2805182,2805765,2806330,2806904,2807500,2808095,2808631,2809215,2809918,2810585,2811272,2811909,2812629,2813292,2813914,2814455,2815037,2815705,2816419,2817173,2817949,2818646,2819416,2820085,2820689,2821286,2822051,2822887,2823589,2824300,2825028,2825815,2826568,2827298,2828035,2828741,2829406,2830202,2831008,2831779,2832552,2833388,2834282,2835258,2836280,2837092,2837982,2838984,284e4,2840997,2842057,2843074,2844029,2845039,2846113,2847184,2848098,2849096,2850043,2851130,2852176,2853242,2854304,2855310,2856321,2857425,2858519,2859672,2860563,2861562,2862592,2863722,2864907,2866013,2867093,2868171,2869207,2870282,2871492,2872609,2873614,2874622,2875647,2876866,2877952,2879192,2880296,2881483,2882538,2883692,2884955,2886034,2887240,2888226,2889226,2890306,2891478,2892627,2893893,2895123,2896311,2897416,2898506,2899754,2901027,2902248,2903345,2904325,2905443,2906667,2907972,2909223,2910443,2911718,2913014,2914166,2915442,2916759,2918002,2919224,2920386,2921482,2922614,2923857,2925181,2926525,2927816,2929105,2930418,2931560,2932774,2934031,2935446,2936819,2938064,2938877,2940124,2941384,2942634,2943852,2945168,2946522,2947852,2949292,2950461,2951731,2953148,2954539,2955868,2957251,2958034,2959289,2960565,2961870,2963266,2964452,2965816,2967089,2968555,2969774,2971079,2972318,2973742,2975057,2976554,2977714,2978886,2980108,2981492,2982929,2984291,2985774,2987252,2988677,2990180,2991365,2992777,2994245,2995628,2997081,2998588,2999781,3000920,3002244,3003610,3004930,3006343,3007750,3009200,3010599,3012043,3013452,3014875,3016268,3017684,3019112,3020573,3021993,3022886,3024263,3025460,3026954,3028468,3029976,3031484,3033006,3034528,3036059,3037425,3038756,3040225,3041028,3041617,3042158,3042790,3043416,3044060,3044708,3045358,3046191,3047091,3047817,3048700,3049641,3050530,3051459,3052346,3053199,3054208,3055215,3056292,3057108,3058080,3059148,3060208,3061242,3062339,3063410,3064398,3065548,3066765,3068017,3069093,3070210,3071404,3072604,3073951,3075152,3076503,3077714,3078925,3080184,3081547,3082969,3084384,3085595,3086779,3087989,3089406,3090828,3092179,3093547,3094928,3096354,3097633,3099079,3100496,3101855,3103195,3104454,3105265,3106178,3107105,3108406,3109799,3111189,3112451,3113560,3114646,3115589,3116508,3117836,3119324,3120852,3122372,3123912,3125135,3126345,3127576,3128636,3129837,3131049,3132269,3133497,3134726,3135946,3137163,3138094,3138997,3139912,3140878,3141964,3143249,3144594,3145710,3147078,3148475,3149789,3151286,3152997,3154545,3155898,3157187,3158265,3159255,3160237,3161198,3162191,3163167,3164315,3165802,3167492,3169023,3170295,3170956,3172016,3173007,3174061,3175347,3176886,3178583,3180044,3181126,3182190,3183467,3185004,3186539,3188057,3189761,3191259,3192401,3193541,3195168,3196870,3198178,3198862,3200246,3201583,3203025,3204708,3206132,3207295,3208624,3210330,3211968,3213155,3214328,3215978,3217687,3219265,3220843,3222004,3223249,3224944,3226504,3227745,3229252,3230675,3231478,3232721,3233967,3235450,3237130,3238456,3239715,3241505,3243250,3244500,3245862,3247564,3248994,3250249,3251457,3252925,3254609,3255923,3257253,3258950,3260448,3261783,3263597,3265217,3266011,3267610,3269272,3270651,3271990,3273529,3275309,3276654,3278098,3279767,3281107,3282486,3284198,3285544,3286901,3288514,3290192,3291597,3292936,3294644,3296097,3297521,3299231,3300657,3302220,3303791,3304626,3306334,3308070,3309291,3310728,3312563,3314141,3315583,3317257,3318676,3320136,3321926,3323374,3325143,3326953,3328402,3329862,3331536,3333062,3334497,3336219,3337733,3339465,3341264,3342790,3344486,3346002,3347036,3348261,3349793,3351354,3353086,3354604,3356259,3358149,3359693,3361410,3362921,3364592,3366448,3367976,3369719,3371253,3372950,3374917,3376106,3377633,3379315,3380850,3382701,3384369,3385958,3387772,3389398,3391213,3392838,3393989,3394991,3396621,3398297,3399665,3401281,3402993,3404620,3406471,3408128,3409745,3411584,3413214,3414935,3416572,3418381,3420012,3421668,3423509,3424589,3426229,3428060,3429708,3431504,3433215,3434975,3436651,3438499,3440223,3441946,3443792,3444754,3446241,3447955,3449680,3451390,3453108,3454917,3456635,3458493,3460217,3462033,3463738,3465497,3467374,3469105,3470910,3472631,3474486,3476215,3477986,3479769,3481519,3483358,3485085,3486944,3488760,3490561,3492321,3494177,3495989,3497845,3499651,3500667,3501680,3503489,3505375,3506397,3508254,3510075,3511915,3513820,3515654,3517477,3519284,3521250,3523094,3524954,3526784,3528648,3530484,3532325,3534156,3535925,3537171,3539002,3540870,3542710,3544286,3544566,3544846,3545122,3545503,3545884,3546263,3546727,3547142,3547618,3548078,3548585,3549051,3549565,3550070,3550660,3551180,3551718,3552227,3552783,3553354,3553947,3554549,3555122,3555723,3556200,3556698,3557241,3557788,3558285,3558804,3559358,3559971,3560517,3561054,3561554,3562116,3562670,3563239,3563810,3564379,3565057,3565751,3566427,3566976,3567588,3568327,3568993,3569630,3570284,3570927,3571660,3572347,3573064,3573744,3574372,3575149,3576018,3576788,3577561,3578328,3579053,3579878,3580317,3580809,3581321,3581870,3582358,3582916,3583431,3584043,3584661,3585248,3585850,3586513,3587069,3587694,3588355,3589023,3589687,3590333,3590928,3591581,3592312,3592941,3593695,3594439,3595208,3596011,3596672,3597394,3598048,3598745,3599578,3600326,3601076,3601884,3602696,3603432,3604248,3605009,3605767,3606493,3607258,3608121,3608852,3609607,3610498,3611302,3612096,3612954,3613619,3614231,3614758,3615377,3616108,3616764,3617385,3618051,3618736,3619437,3620026,3620584,3621162,3621802,3622495,3623212,3623923,3624605,3625320,3626094,3626881,3627545,3628321,3629159,3629913,3630668,3631385,3632158,3632871,3633467,3633741,3634015,3634315,3634640,3635024,3635404,3635891,3636382,3636854,3637303,3637855,3638370,3638897,3639376,3639970,3640570,3641130,3641721,3642370,3642972,3643556,3644183,3644721,3645364,3645865,3646357,3646880,3647449,3647980,3648535,3649063,3649674,3650246,3650778,3651386,3652036,3652649,3653226,3653846,3654421,3655090,3655727,3656372,3656984,3657643,3658362,3659017,3659743,3660488,3661159,3661931,3662719,3663497,3664213,3664968,3665741,3666520,3667268,3668009,3668859,3669572,3670365,3670859,3671365,3671895,3672450,3672958,3673525,3674046,3674713,3675322,3675864,3676463,3677122,3677773,3678467,3679136,3679870,3680546,3681200,3681857,3682508,3683249,3683908,3684604,3685407,3686115,3686909,3687601,3688293,3689086,3689828,3690637,3691436,3692188,3693001,3693807,3694661,3695445,3696259,3696997,3697819,3698579,3699532,3700306,3701135,3701993,3702885,3703713,3704517,3705116,3705653,3706169,3706706,3707268,3707841,3708472,3709078,3709777,3710434,3711101,3711728,3712391,3713119,3713749,3714326,3714990,3715649,3716439,3717191,3717876,3718537,3719229,3720113,3720904,3721649,3722406,3723105,3723940,3724689,3725242,3725931,3726675,3727413,3728177,3728946,3729706,3730480,3731249,3732004,3732768,3733522,3734286,3735061,3735808,3736550,3737319,3738078,3738843,3739603,3740355,3741097,3741837,3742590,3743353,3744099,3744833,3745585,3746343,3747089,3747840,3748573,3749315,3750073,3750829,3751591,3752332,3753056,3753803,3754560,3755319,3756068,3756790,3757536,3758291,3759049,3759795,3760534,3761285,3762036,3762799,3763558,3764283,3765020,3765772,3766526,3767271,3768029,3768760,3769520,3770275,3771032,3771786,3772526,3773274,3774016,3774778,3775538,3776278,3777017,3777780,3778530,3779289,3780030,3780768,3781520,3782271,3783026,3783774,3784512,3785256,3786002,3786764,3787518,3788262,3789004,3789759,3790516,3791263,3792007,3792740,3793474,3794169,3794835,3795512,3796173,3796823,3797491,3798150,3798817,3799469,3800118,3800775,3801427,3802093,3802749,3803396,3804039,3804696,3805412,3806166,3806920,3807650,3808401,3809157,3809906,3810640,3811367,3812094,3812838,3813581,3814333,3815047,3815790,3816538,3817282,3818024,3818758,3819500,3820251,3820997,3821736,3822452,3823184,3823926,3824677,3825422,3826149,3826882,3827651,3828399,3829159,3829889,3830630,3831374,3832120,3832877,3833596,3834325,3835063,3835812,3836548,3837187,3837811,3838463,3839127,3839787,3840425,3841055,3841717,3842379,3843040,3843676,3844289,3844939,3845587,3846245,3846880,3847524,3848176,3848824,3849461,3850076,3850719,3851386,3852046,3852707,3853326,3853970,3854623,3855263,3855889,3856309,3856897,3857517,3858173,3858805,3859468,3860158,3860883,3861575,3862250,3862931,3863607,3864283,3864966,3865629,3866319,3867006,3867707,3868409,3869141,3869872,3870611,3871354,3872067,3872800,3873543,3874284,3874986,3875715,3876426,3877090,3877743,3878371,3879015,3879654,3880311,3880948,3881575,3882102,3882625,3883255,3883883,3884531,3885217,3885918,3886619,3887288,3887975,3888665,3889353,3890037,3890700,3891382,3892069,3892754,3893418,3894143,3894873,3895615,3896326,3897040,3897738,3898470,3899209,3899911,3900631,3901365,3902072,3902730,3903355,3904004,3904667,3905321,3905956,3906595,3907242,3907804,3907874,3907932,3908009,3908619,3910667,3912715,3914763,3916776,3918824,3920872,3922920,3924968,3927016,3929064,3931112,3933052,3934092,3935135,3936174,3937260,3939296,3941344,3943392,3945440,3947443,3949491,3951539,3953587,3955535,3957480,3959387,3961349,3963308,3965207,3967153,3969014,3971012,3973060,3975108,3977156,3979204,3981252,3983300,3985348,3987362,3989410,3991458,3993506,3995516,3997564,3999612,4001660,4002873,4004921,4006969,4008353,4009365,4011413,4013461,4015509,4017522,4019570,4021618,4023666,4025672,4027690,4029702,4031750,4033646,4035694,4037720,4039768,4041812,4043747,4045795,4046846,4048068,4049266,4051321,4053369,4054943,4056712,4058398,4060302,4062128,4064132,4066180,4068007,4070055,4071466,4072696,4073646,4074905],sizes:[1168,934,1139,1060,1154,1096,957,540,604,1075,1265,1144,950,1201,1064,1094,937,993,999,1307,909,1010,1308,1056,1140,1226,884,1209,1096,1106,1080,1136,1361,926,915,1133,706,962,1073,1252,1363,1149,1134,987,1127,1036,1042,1055,1181,831,1082,996,739,642,1133,912,1277,1191,1089,1440,1154,924,1180,1357,1015,1084,1197,1261,814,1129,828,1283,689,927,1185,976,617,911,1124,1003,1007,1136,1069,1155,1016,1078,754,1239,1216,1006,1055,1328,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2055,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1701,2048,2048,2048,2048,2048,1656,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1767,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2057,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1828,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1603,2048,2048,2048,2048,2048,2048,2048,1854,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2055,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2056,2048,2048,2048,2048,2048,2048,2048,2048,2048,1860,2048,2056,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2012,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1810,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2056,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2056,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2056,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1850,2048,2048,2048,2048,2048,2048,2048,1914,2048,2048,2048,2048,2048,2023,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2056,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1667,2048,2048,2048,2048,2048,2048,2048,2048,2048,2056,2048,1861,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1613,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1572,2048,2048,2048,2048,2048,1872,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1907,2048,2048,2048,2048,2048,2048,2048,2048,2048,2057,2048,2048,2048,2056,2048,2048,2048,2056,2048,2048,2048,2048,2048,2048,2048,2048,2026,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1795,2048,2048,2048,2048,2048,1577,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1937,868,859,854,519,266,266,367,330,369,369,507,483,530,576,508,577,586,632,646,555,604,670,647,673,666,679,679,659,701,782,759,733,733,791,762,690,751,827,842,847,830,762,920,822,750,795,856,849,904,915,906,914,933,908,758,842,989,968,933,948,864,985,961,986,854,924,967,1059,981,1008,946,1043,1110,1048,1023,955,917,1165,1075,1066,1139,979,1080,1112,1098,1113,900,1023,1094,1119,1087,1108,1096,991,1133,1106,1124,993,983,959,1087,1137,1175,1114,1102,1077,1136,1077,1224,1125,953,1126,1131,1270,1078,1230,1115,1134,1141,1220,1208,1040,1080,1011,1021,1218,1153,1113,1240,1189,1057,1076,1207,1273,1286,1212,842,1164,1148,1263,1285,1252,1230,1273,1139,1214,1340,1306,1224,1167,985,1127,1203,1280,1313,1300,1287,1285,1181,1228,1211,1256,1391,1395,937,1231,1161,1423,1298,1212,1297,1388,1331,1167,1353,1327,1389,1441,1284,927,1261,1183,1464,1304,1362,1230,1391,1313,1305,1270,1329,1329,1396,1340,1236,1111,1316,1216,1399,1371,1387,1479,1416,1479,1261,1276,1417,1393,1332,1442,1214,1151,1296,1331,1248,1337,1361,1397,1366,1376,1390,1395,1386,1398,1429,1413,1394,869,1345,1185,1468,1492,1502,1519,1515,1532,1510,1372,1327,1420,1442,1385,1437,1414,975,1417,1302,1455,1491,1465,1460,1452,1437,1443,1464,1361,1521,1534,1510,1506,1488,1336,1074,1477,1372,1628,1657,1589,1517,1511,1506,1544,1490,1430,1632,1581,1515,1507,1559,1404,961,1462,1323,1480,1603,1527,1442,1421,1542,1443,1437,1285,1465,1489,1440,1458,1624,1493,1206,1327,1404,1494,1574,1706,1519,1586,1559,1450,1569,1456,1297,1492,1503,1483,1613,1513,1687,1389,1175,1584,1409,1549,1627,1463,1556,1478,1661,1538,1713,1494,1462,1700,1539,1646,1500,1573,1562,1192,1452,1494,1541,1691,1530,1652,1530,1545,1626,1540,1707,1539,1529,1594,1667,1472,1566,1631,1550,1391,1165,1621,1347,1649,1577,1495,1619,1666,1605,1646,1559,1512,1361,1537,1665,1671,1602,1659,1614,1487,1244,1457,1551,1533,1617,1587,1626,1611,1599,1659,1652,1603,1541,1429,1627,1564,1739,1648,1624,1563,1634,1487,1207,1637,1352,1636,1648,1606,1692,1660,1636,1672,1662,1611,1690,1482,1574,1577,1701,1688,1658,1636,1665,1187,1159,1674,1511,1619,1590,1658,1703,1675,1648,1663,1709,1666,1672,1325,1665,1670,1730,1652,1620,1670,1706,1443,943,1624,1429,1669,1717,1670,1627,1625,1709,1695,1652,1675,1700,1540,1512,1646,1640,1704,1691,1681,1677,1684,1467,992,1648,1504,1676,1709,1676,1644,1626,1705,1667,1667,1682,1713,1693,1492,1657,1667,1727,1656,1646,1701,1750,1732,959,1460,1557,1532,1765,1725,1735,1671,1633,1686,1702,1675,1662,1776,1677,1524,1672,1770,1784,1692,1635,1679,1701,1678,997,1500,1580,1579,1752,1693,1721,1666,1636,1773,1813,1663,1695,1714,1704,1510,1677,1753,1834,1656,1619,1704,1774,1768,1368,1142,1649,1550,1671,1772,1773,1689,1699,1674,1819,1707,1700,1665,1848,1641,1617,1682,1682,1732,1836,1602,1710,1698,1829,1393,1135,1769,1621,1664,1772,1681,1715,1855,1643,1709,1841,1737,1693,1761,1796,1501,1731,1710,1760,1799,1691,1715,1816,1707,1718,1067,1409,1718,1525,1819,1746,1708,1803,1714,1678,1845,1718,1686,1840,1701,1707,1527,1852,1718,1693,1863,1675,1730,1814,1707,1850,1115,1413,1842,1527,1706,1701,1848,1705,1830,1711,1715,1871,1686,1848,1717,1725,1692,1709,1699,1831,950,414,443,448,459,462,501,535,498,472,527,526,509,566,551,560,510,541,608,650,587,583,565,574,596,595,536,584,703,667,687,637,720,663,622,541,582,668,714,754,776,697,770,669,604,597,765,836,702,711,728,787,753,730,737,706,665,796,806,771,773,836,894,976,1022,812,890,1002,1016,997,1060,1017,955,1010,1074,1071,914,998,947,1087,1046,1066,1062,1006,1011,1104,1094,1153,891,999,1030,1130,1185,1106,1080,1078,1036,1075,1210,1117,1005,1008,1025,1219,1086,1240,1104,1187,1055,1154,1263,1079,1206,986,1e3,1080,1172,1149,1266,1230,1188,1105,1090,1248,1273,1221,1097,980,1118,1224,1305,1251,1220,1275,1296,1152,1276,1317,1243,1222,1162,1096,1132,1243,1324,1344,1291,1289,1313,1142,1214,1257,1415,1373,1245,813,1247,1260,1250,1218,1316,1354,1330,1440,1169,1270,1417,1391,1329,1383,783,1255,1276,1305,1396,1186,1364,1273,1466,1219,1305,1239,1424,1315,1497,1160,1172,1222,1384,1437,1362,1483,1478,1425,1503,1185,1412,1468,1383,1453,1507,1193,1139,1324,1366,1320,1413,1407,1450,1399,1444,1409,1423,1393,1416,1428,1461,1420,893,1377,1197,1494,1514,1508,1508,1522,1522,1531,1366,1331,1469,803,589,541,632,626,644,648,650,833,900,726,883,941,889,929,887,853,1009,1007,1077,816,972,1068,1060,1034,1097,1071,988,1150,1217,1252,1076,1117,1194,1200,1347,1201,1351,1211,1211,1259,1363,1422,1415,1211,1184,1210,1417,1422,1351,1368,1381,1426,1279,1446,1417,1359,1340,1259,811,913,927,1301,1393,1390,1262,1109,1086,943,919,1328,1488,1528,1520,1540,1223,1210,1231,1060,1201,1212,1220,1228,1229,1220,1217,931,903,915,966,1086,1285,1345,1116,1368,1397,1314,1497,1711,1548,1353,1289,1078,990,982,961,993,976,1148,1487,1690,1531,1272,661,1060,991,1054,1286,1539,1697,1461,1082,1064,1277,1537,1535,1518,1704,1498,1142,1140,1627,1702,1308,684,1384,1337,1442,1683,1424,1163,1329,1706,1638,1187,1173,1650,1709,1578,1578,1161,1245,1695,1560,1241,1507,1423,803,1243,1246,1483,1680,1326,1259,1790,1745,1250,1362,1702,1430,1255,1208,1468,1684,1314,1330,1697,1498,1335,1814,1620,794,1599,1662,1379,1339,1539,1780,1345,1444,1669,1340,1379,1712,1346,1357,1613,1678,1405,1339,1708,1453,1424,1710,1426,1563,1571,835,1708,1736,1221,1437,1835,1578,1442,1674,1419,1460,1790,1448,1769,1810,1449,1460,1674,1526,1435,1722,1514,1732,1799,1526,1696,1516,1034,1225,1532,1561,1732,1518,1655,1890,1544,1717,1511,1671,1856,1528,1743,1534,1697,1967,1189,1527,1682,1535,1851,1668,1589,1814,1626,1815,1625,1151,1002,1630,1676,1368,1616,1712,1627,1851,1657,1617,1839,1630,1721,1637,1809,1631,1656,1841,1080,1640,1831,1648,1796,1711,1760,1676,1848,1724,1723,1846,962,1487,1714,1725,1710,1718,1809,1718,1858,1724,1816,1705,1759,1877,1731,1805,1721,1855,1729,1771,1783,1750,1839,1727,1859,1816,1801,1760,1856,1812,1856,1806,1016,1013,1809,1886,1022,1857,1821,1840,1905,1834,1823,1807,1966,1844,1860,1830,1864,1836,1841,1831,1769,1246,1831,1868,1840,1576,280,280,276,381,381,379,464,415,476,460,507,466,514,505,590,520,538,509,556,571,593,602,573,601,477,498,543,547,497,519,554,613,546,537,500,562,554,569,571,569,678,694,676,549,612,739,666,637,654,643,733,687,717,680,628,777,869,770,773,767,725,825,439,492,512,549,488,558,515,612,618,587,602,663,556,625,661,668,664,646,595,653,731,629,754,744,769,803,661,722,654,697,833,748,750,808,812,736,816,761,758,726,765,863,731,755,891,804,794,858,665,612,527,619,731,656,621,666,685,701,589,558,578,640,693,717,711,682,715,774,787,664,776,838,754,755,717,773,713,596,274,274,300,325,384,380,487,491,472,449,552,515,527,479,594,600,560,591,649,602,584,627,538,643,501,492,523,569,531,555,528,611,572,532,608,650,613,577,620,575,669,637,645,612,659,719,655,726,745,671,772,788,778,716,755,773,779,748,741,850,713,793,494,506,530,555,508,567,521,667,609,542,599,659,651,694,669,734,676,654,657,651,741,659,696,803,708,794,692,692,793,742,809,799,752,813,806,854,784,814,738,822,760,953,774,829,858,892,828,804,599,537,516,537,562,573,631,606,699,657,667,627,663,728,630,577,664,659,790,752,685,661,692,884,791,745,757,699,835,749,553,689,744,738,764,769,760,774,769,755,764,754,764,775,747,742,769,759,765,760,752,742,740,753,763,746,734,752,758,746,751,733,742,758,756,762,741,724,747,757,759,749,722,746,755,758,746,739,751,751,763,759,725,737,752,754,745,758,731,760,755,757,754,740,748,742,762,760,740,739,763,750,759,741,738,752,751,755,748,738,744,746,762,754,744,742,755,757,747,744,733,734,695,666,677,661,650,668,659,667,652,649,657,652,666,656,647,643,657,716,754,754,730,751,756,749,734,727,727,744,743,752,714,743,748,744,742,734,742,751,746,739,716,732,742,751,745,727,733,769,748,760,730,741,744,746,757,719,729,738,749,736,639,624,652,664,660,638,630,662,662,661,636,613,650,648,658,635,644,652,648,637,615,643,667,660,661,619,644,653,640,626,420,588,620,656,632,663,690,725,692,675,681,676,676,683,663,690,687,701,702,732,731,739,743,713,733,743,741,702,729,711,664,653,628,644,639,657,637,627,527,523,630,628,648,686,701,701,669,687,690,688,684,663,682,687,685,664,725,730,742,711,714,698,732,739,702,720,734,707,658,625,649,663,654,635,639,647,562,70,58,77,610,2048,2048,2048,2013,2048,2048,2048,2048,2048,2048,2048,1940,1040,1043,1039,1086,2036,2048,2048,2048,2003,2048,2048,2048,1948,1945,1907,1962,1959,1899,1946,1861,1998,2048,2048,2048,2048,2048,2048,2048,2014,2048,2048,2048,2010,2048,2048,2048,1213,2048,2048,1384,1012,2048,2048,2048,2013,2048,2048,2048,2006,2018,2012,2048,1896,2048,2026,2048,2044,1935,2048,1051,1222,1198,2055,2048,1574,1769,1686,1904,1826,2004,2048,1827,2048,1411,1230,950,1259,509],successes:[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,1,0,0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,1,1,0,0,0,1,0,0,0,1,1,1,0,1,0,1,0,1,1,0,1,1,1,1,0,1,1,1,1,1,1,0,1,0,1,1,1,1,1]};compressedData["data"]=byteArray;assert(typeof Module.LZ4==="object","LZ4 not present - was your app build with -s LZ4=1 ?");Module.LZ4.loadPackage({metadata:metadata,compressedData:compressedData},true);Module["removeRunDependency"]("datafile_pywavelets-tests.data")}Module["addRunDependency"]("datafile_pywavelets-tests.data");if(!Module.preloadResults)Module.preloadResults={};Module.preloadResults[PACKAGE_NAME]={fromCache:false};if(fetched){processPackageData(fetched);fetched=null}else{fetchedCallback=processPackageData}}if(Module["calledRun"]){runWithFS()}else{if(!Module["preRun"])Module["preRun"]=[];Module["preRun"].push(runWithFS)}};loadPackage({files:[{filename:"/lib/python3.9/site-packages/pywt/conftest.py",start:0,end:143,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test__pywt.py",start:143,end:5612,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_concurrent.py",start:5612,end:9599,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_cwt_wavelets.py",start:9599,end:23188,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_data.py",start:23188,end:25454,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_deprecations.py",start:25454,end:27674,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_doc.py",start:27674,end:28296,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_dwt_idwt.py",start:28296,end:38648,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_functions.py",start:38648,end:39811,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_matlab_compatibility.py",start:39811,end:45696,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_matlab_compatibility_cwt.py",start:45696,end:51979,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_modes.py",start:51979,end:56827,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_mra.py",start:56827,end:65755,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_multidim.py",start:65755,end:80688,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_multilevel.py",start:80688,end:119713,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_perfect_reconstruction.py",start:119713,end:121508,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_swt.py",start:121508,end:146362,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_thresholding.py",start:146362,end:152895,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_wavelet.py",start:152895,end:164384,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_wp.py",start:164384,end:172399,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_wp2d.py",start:172399,end:181793,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/test_wpnd.py",start:181793,end:188045,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/data/cwt_matlabR2015b_result.npz",start:188045,end:2007551,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/data/dwt_matlabR2012a_result.npz",start:2007551,end:5598421,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/data/wavelab_test_signals.npz",start:5598421,end:5783239,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/data/generate_matlab_data.py",start:5783239,end:5787196,audio:0},{filename:"/lib/python3.9/site-packages/pywt/tests/data/generate_matlab_data_cwt.py",start:5787196,end:5790444,audio:0}],remote_package_size:4079510,package_uuid:"e0ad0268-379f-4b7f-8b74-4bd4ea9941fc"})})(); \ No newline at end of file diff --git a/spaces/pytorch/transformers/README.md b/spaces/pytorch/transformers/README.md deleted file mode 100644 index ed58c0b085343013a4726771b5fd3782983134d1..0000000000000000000000000000000000000000 --- a/spaces/pytorch/transformers/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Transformers -emoji: 🌍 -colorFrom: pink -colorTo: yellow -sdk: gradio -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/qkorbit/AltDiffusion/app.py b/spaces/qkorbit/AltDiffusion/app.py deleted file mode 100644 index 76ac5793dd785d1f46837f0bcaeeab37d0bbb3ca..0000000000000000000000000000000000000000 --- a/spaces/qkorbit/AltDiffusion/app.py +++ /dev/null @@ -1,330 +0,0 @@ -import io -import re -import imp -import time -import json -import base64 -import requests -import gradio as gr -import ui_functions as uifn -from css_and_js import js, call_JS -from PIL import Image, PngImagePlugin, ImageChops - -url_host = "http://flagstudio.baai.ac.cn" -token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2lkIjoiZjAxOGMxMzJiYTUyNDBjMzk5NTMzYTI5YjBmMzZiODMiLCJhcHBfbmFtZSI6IndlYiIsImlkZW50aXR5X3R5cGUiOiIyIiwidXNlcl9yb2xlIjoiMiIsImp0aSI6IjVjMmQzMjdiLWI5Y2MtNDhiZS1hZWQ4LTllMjQ4MDk4NzMxYyIsIm5iZiI6MTY2OTAwNjE5NywiZXhwIjoxOTg0MzY2MTk3LCJpYXQiOjE2NjkwMDYxOTd9.9B3MDk8wA6iWH5puXjcD19tJJ4Ox7mdpRyWZs5Kwt70" - -def read_content(file_path: str) -> str: - """read the content of target file - """ - with open(file_path, 'r', encoding='utf-8') as f: - content = f.read() - - return content - -def filter_content(raw_style: str): - if "(" in raw_style: - i = raw_style.index("(") - else : - i = -1 - - if i == -1: - return raw_style - else : - return raw_style[:i] - -def upload_image(img): - url = url_host + "/api/v1/image/get-upload-link" - headers = {"token": token} - r = requests.post(url, json={}, headers=headers) - if r.status_code != 200: - raise gr.Error(r.reason) - head_res = r.json() - if head_res["code"] != 0: - raise gr.Error("Unknown error") - image_id = head_res["data"]["image_id"] - image_url = head_res["data"]["url"] - image_headers = head_res["data"]["headers"] - - imgBytes = io.BytesIO() - img.save(imgBytes, "PNG") - imgBytes = imgBytes.getvalue() - - r = requests.put(image_url, data=imgBytes, headers=image_headers) - if r.status_code != 200: - raise gr.Error(r.reason) - return image_id, image_url - -def post_reqest(seed, prompt, width, height, image_num, img=None, mask=None): - data = { - "type": "gen-image", - "parameters": { - "width": width, # output height width - "height": height, # output image height - "prompts": [prompt], - } - } - data["parameters"]["seed"] = int(seed) - if img is not None: - # Upload image - image_id, image_url = upload_image(img) - data["parameters"]["init_image"] = { - "image_id": image_id, - "url": image_url, - "width": img.width, - "height": img.height, - } - if mask is not None: - # Upload mask - extrama = mask.convert("L").getextrema() - if extrama[1] > 0: - mask_id, mask_url = upload_image(mask) - data["parameters"]["mask_image"] = { - "image_id": mask_id, - "url": mask_url, - "width": mask.width, - "height": mask.height, - } - headers = {"token": token} - - # Send create task request - all_task_data = [] - url = url_host+"/api/v1/task/create" - for _ in range(image_num): - r = requests.post(url, json=data, headers=headers) - if r.status_code != 200: - raise gr.Error(r.reason) - create_res = r.json() - if create_res['code'] == 3002: - raise gr.Error("Inappropriate prompt detected.") - elif create_res['code'] != 0: - raise gr.Error("Unknown error") - all_task_data.append(create_res["data"]) - - # Get result - url = url_host+"/api/v1/task/status" - images = [] - while True: - if len(all_task_data) <= 0: - return images - for i in range(len(all_task_data)-1, -1, -1): - data = all_task_data[i] - r = requests.post(url, json=data, headers=headers) - if r.status_code != 200: - raise gr.Error(r.reason) - res = r.json() - if res["code"] == 6002: - # Running - continue - if res["code"] == 6005: - raise gr.Error("NSFW image detected.") - elif res["code"] == 0: - # Finished - for img_info in res["data"]["images"]: - img_res = requests.get(img_info["url"]) - images.append(Image.open(io.BytesIO(img_res.content)).convert("RGB")) - del all_task_data[i] - else: - raise gr.Error(f"Error code: {res['code']}") - time.sleep(1) - -def request_images(raw_text, class_draw, style_draw, batch_size, w, h, seed): - if filter_content(class_draw) != "国画": - if filter_content(class_draw) != "通用": - raw_text = raw_text + f",{filter_content(class_draw)}" - - for sty in style_draw: - raw_text = raw_text + f",{filter_content(sty)}" - elif filter_content(class_draw) == "国画": - raw_text = raw_text + ",国画,水墨画,大作,黑白,高清,传统" - print(f"raw text is {raw_text}") - - images = post_reqest(seed, raw_text, w, h, int(batch_size)) - - return images - - -def img2img(prompt, image_and_mask): - if image_and_mask["image"].width <= image_and_mask["image"].height: - width = 512 - height = int((width/image_and_mask["image"].width)*image_and_mask["image"].height) - else: - height = 512 - width = int((height/image_and_mask["image"].height)*image_and_mask["image"].width) - return post_reqest(0, prompt, width, height, 1, image_and_mask["image"], image_and_mask["mask"]) - - -examples = [ - '水墨蝴蝶和牡丹花,国画', - '苍劲有力的墨竹,国画', - '暴风雨中的灯塔', - '机械小松鼠,科学幻想', - '中国水墨山水画,国画', - "Lighthouse in the storm", - "A dog", - "Landscape by 张大千", - "A tiger 长了兔子耳朵", - "A baby bird 铅笔素描", -] - -if __name__ == "__main__": - block = gr.Blocks(css=read_content('style.css')) - - with block: - gr.HTML(read_content("header.html")) - with gr.Tabs(elem_id='tabss') as tabs: - - with gr.TabItem("文生图(Text-to-img)", id='txt2img_tab'): - - with gr.Group(): - with gr.Box(): - with gr.Row().style(mobile_collapse=False, equal_height=True): - text = gr.Textbox( - label="Prompt", - show_label=False, - max_lines=1, - placeholder="Input text(输入文字)", - interactive=True, - ).style( - border=(True, False, True, True), - rounded=(True, False, False, True), - container=False, - ) - - btn = gr.Button("Generate image").style( - margin=False, - rounded=(True, True, True, True), - ) - with gr.Row().style(mobile_collapse=False, equal_height=True): - class_draw = gr.Radio(choices=["通用(general)","国画(traditional Chinese painting)",], value="通用(general)", show_label=True, label='生成类型(type)') - # class_draw = gr.Dropdown(["通用(general)", "国画(traditional Chinese painting)", - # "照片,摄影(picture photography)", "油画(oil painting)", - # "铅笔素描(pencil sketch)", "CG", - # "水彩画(watercolor painting)", "水墨画(ink and wash)", - # "插画(illustrations)", "3D", "图生图(img2img)"], - # label="生成类型(type)", - # show_label=True, - # value="通用(general)") - with gr.Row().style(mobile_collapse=False, equal_height=True): - style_draw = gr.CheckboxGroup(["蒸汽朋克(steampunk)", "电影摄影风格(film photography)", - "概念艺术(concept art)", "Warming lighting", - "Dramatic lighting", "Natural lighting", - "虚幻引擎(unreal engine)", "4k", "8k", - "充满细节(full details)"], - label="画面风格(style)", - show_label=True, - ) - with gr.Row().style(mobile_collapse=False, equal_height=True): - # sample_size = gr.Slider(minimum=1, - # maximum=4, - # step=1, - # label="生成数量(number)", - # show_label=True, - # interactive=True, - # ) - sample_size = gr.Radio(choices=["1","2","3","4"], value="1", show_label=True, label='生成数量(number)') - seed = gr.Number(0, label='seed', interactive=True) - with gr.Row().style(mobile_collapse=False, equal_height=True): - w = gr.Slider(512,1024,value=512, step=64, label="width") - h = gr.Slider(512,1024,value=512, step=64, label="height") - - gallery = gr.Gallery( - label="Generated images", show_label=False, elem_id="gallery" - ).style(grid=[2,2]) - gr.Examples(examples=examples, fn=request_images, inputs=text, outputs=gallery, examples_per_page=100) - with gr.Row().style(mobile_collapse=False, equal_height=True): - img_choices = gr.Dropdown(["图片1(img1)"],label='请选择一张图片发送到图生图',show_label=True,value="图片1(img1)") - with gr.Row().style(mobile_collapse=False, equal_height=True): - output_txt2img_copy_to_input_btn = gr.Button("发送图片到图生图(Sent the image to img2img)").style( - margin=False, - rounded=(True, True, True, True), - ) - - with gr.Row(): - prompt = gr.Markdown("提示(Prompt):", visible=False) - with gr.Row(): - move_prompt_zh = gr.Markdown("请移至图生图部分进行编辑(拉到顶部)", visible=False) - with gr.Row(): - move_prompt_en = gr.Markdown("Please move to the img2img section for editing(Pull to the top)", visible=False) - - - - text.submit(request_images, inputs=[text, class_draw, style_draw, sample_size, w, h, seed], outputs=gallery) - btn.click(request_images, inputs=[text, class_draw, style_draw, sample_size, w, h, seed], outputs=gallery) - - sample_size.change( - fn=uifn.change_img_choices, - inputs=[sample_size], - outputs=[img_choices] - ) - - with gr.TabItem("图生图(Img-to-Img)", id="img2img_tab"): - with gr.Row(elem_id="prompt_row"): - img2img_prompt = gr.Textbox(label="Prompt", - elem_id='img2img_prompt_input', - placeholder="神奇的森林,流淌的河流.", - lines=1, - max_lines=1, - value="", - show_label=False).style() - - img2img_btn_mask = gr.Button("Generate", variant="primary", visible=False, - elem_id="img2img_mask_btn") - img2img_btn_editor = gr.Button("Generate", variant="primary", elem_id="img2img_edit_btn") - gr.Markdown('#### 输入图像') - with gr.Row().style(equal_height=False): - #with gr.Column(): - img2img_image_mask = gr.Image( - value=None, - source="upload", - interactive=True, - tool="sketch", - type='pil', - elem_id="img2img_mask", - image_mode="RGBA" - ) - gr.Markdown('#### 编辑后的图片') - with gr.Row(): - output_img2img_gallery = gr.Gallery(label="Images", elem_id="img2img_gallery_output").style( - grid=[4,4,4] ) - with gr.Row(): - gr.Markdown('提示(prompt):') - with gr.Row(): - gr.Markdown('请选择一张图像掩盖掉一部分区域,并输入文本描述') - with gr.Row(): - gr.Markdown('Please select an image to cover up a part of the area and enter a text description.') - gr.Markdown('# 编辑设置',visible=False) - - - output_txt2img_copy_to_input_btn.click( - uifn.copy_img_to_input, - [gallery, img_choices], - [tabs, img2img_image_mask, move_prompt_zh, move_prompt_en, prompt] - ) - - - img2img_func = img2img - img2img_inputs = [img2img_prompt, img2img_image_mask] - img2img_outputs = [output_img2img_gallery] - - img2img_btn_mask.click( - img2img_func, - img2img_inputs, - img2img_outputs - ) - - def img2img_submit_params(): - return (img2img_func, - img2img_inputs, - img2img_outputs) - - img2img_btn_editor.click(*img2img_submit_params()) - - # GENERATE ON ENTER - img2img_prompt.submit(None, None, None, - _js=call_JS("clickFirstVisibleButton", - rowId="prompt_row")) - - gr.HTML(read_content("footer.html")) - # gr.Image('./contributors.png') - - block.queue(max_size=512, concurrency_count=256).launch() diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Airmagnet Survey Download Cracked 13.md b/spaces/quidiaMuxgu/Expedit-SAM/Airmagnet Survey Download Cracked 13.md deleted file mode 100644 index 456f9aad51fa9fce6f9d52955de4f8de4ccbf1fe..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Airmagnet Survey Download Cracked 13.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Airmagnet Survey Download Cracked 13


    Download File ○○○ https://geags.com/2uCrnO



    - -Airmagnet Survey Pro Cost Game extremely innovative and interactive yanitu marketing games of all time. GTA 5 's i9000 sales possess ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Its Plc Professional Edition Activation Key.md b/spaces/quidiaMuxgu/Expedit-SAM/Its Plc Professional Edition Activation Key.md deleted file mode 100644 index 0090098804c50b5a820d54be7aace6627555979a..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Its Plc Professional Edition Activation Key.md +++ /dev/null @@ -1,11 +0,0 @@ -
    -

    ITS PLC Professional Edition: A Powerful Tool for PLC Training and Education

    -

    ITS PLC Professional Edition is a software that simulates industrial systems that can be controlled by programmable logic controllers (PLCs). It is designed for individuals who want to learn how to program and operate PLCs in realistic scenarios. ITS PLC Professional Edition works with all the major PLC brands, such as Allen-Bradley, Siemens, Mitsubishi, Omron, Schneider and many others. It also supports different programming languages, such as ladder logic, function block diagram, instruction list and structured text.

    -

    ITS PLC Professional Edition offers five systems inspired by common industrial plants: Sorting, Batching, Palletizer, Pick & Place and Automatic Warehouse. Each system is a typical industrial application of PLCs that will give you the opportunity to practice real world control tasks. You can connect your own PLC to the simulated system and control it in real-time. You can also monitor the system variables, inputs and outputs using the built-in HMI (Human Machine Interface).

    -

    its plc professional edition activation key


    Download Zip >>> https://geags.com/2uCrvF



    -

    ITS PLC Professional Edition is an essential tool for PLC training and education. It will improve your PLC skills and knowledge with high quality simulations of industrial plants. You can download a free trial version from the official website[^1^] or buy a license for $299.00 USD. ITS PLC Professional Edition is compatible with Windows XP/Vista/7/8/10/11.

    ITS PLC Professional Edition is not only a simulation software, but also a learning platform. It includes a comprehensive exercise book that guides you through the specification and programming of logic control applications in the ITS PLC training environment. The exercise book contains 72 PLC programs that cover different aspects of industrial automation, such as sequential control, timers, counters, sensors, actuators, logic gates, arithmetic operations and more. You can use the exercise book as a self-study material or as a teaching resource for your classes.

    -

    ITS PLC Professional Edition is also compatible with other software tools that can enhance your PLC training experience. For example, you can use ITS PLC Professional Edition with Factory I/O, a 3D simulation software that allows you to create and control your own industrial systems. You can also use ITS PLC Professional Edition with AUTOMGEN, a software that supports different programming languages and standards, such as GRAFCET (IEC 60848), SFC (IEC 61131-3) and LADDER (IEC 61131-3). By using these software tools together, you can create and test complex and realistic industrial applications.

    -

    ITS PLC Professional Edition is a powerful and versatile tool for PLC training and education. It will help you to master the fundamentals and advanced concepts of PLC programming and operation. It will also prepare you for the challenges and opportunities of the industrial automation field. If you are interested in learning more about ITS PLC Professional Edition, you can visit the official website or watch the video demonstration. You can also contact the developer Real Games Lda for any questions or feedback.

    -

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Malwarebytes Anti-Malware Premium 2.0.1.1004 Keys-P2P Download __EXCLUSIVE__.md b/spaces/quidiaMuxgu/Expedit-SAM/Malwarebytes Anti-Malware Premium 2.0.1.1004 Keys-P2P Download __EXCLUSIVE__.md deleted file mode 100644 index 6d37d9cd04dea0a62e66cfcd3ad7d26999da2d71..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Malwarebytes Anti-Malware Premium 2.0.1.1004 Keys-P2P Download __EXCLUSIVE__.md +++ /dev/null @@ -1,9 +0,0 @@ -
    -

    malwarebytes anti-malware premium offers powerful protection against viruses, ransomware, and other types of malware, and it has a variety of features that can meet the needs of both personal and business users. so if youre looking for an effective way to keep your devices safe from online threats, then malwarebytes anti-malware premium is the app for you!

    -

    overall, malwarebytes anti-malware premium key offers powerful protection against viruses, ransomware, and other types of malware, and it has a variety of features that can meet the needs of both personal and business users. so if youre looking for an effective way to keep your devices safe from online threats, then malwarebytes anti-malware premium is the app for you!

    -

    Malwarebytes Anti-Malware Premium 2.0.1.1004 Keys-P2P Download


    Download File > https://geags.com/2uCq65



    -

    malwarebytes anti-malware premium key offers powerful protection against viruses, ransomware, and other types of malware, and it has a variety of features that can meet the needs of both personal and business users. so if youre looking for an effective way to keep your devices safe from online threats, then malwarebytes anti-malware premium is the app for you!

    -

    our main goal is to protect our users from malicious hosts that could either be servers participating in drive-by downloads or even home computers spewing spam, jrme segura, senior security researcher at malwarebytes, told tf.

    -

    additionally, malwarebytes anti-malware also offers a business edition that provides extra features for businesses such as device management and protection for multiple devices. this makes it a great choice for businesses who want to protect their employees devices from online threats.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/rachana219/MODT2/trackers/strongsort/deep/models/osnet.py b/spaces/rachana219/MODT2/trackers/strongsort/deep/models/osnet.py deleted file mode 100644 index b77388f13289f050da2bf2bdebd40ab4fce6f976..0000000000000000000000000000000000000000 --- a/spaces/rachana219/MODT2/trackers/strongsort/deep/models/osnet.py +++ /dev/null @@ -1,598 +0,0 @@ -from __future__ import division, absolute_import -import warnings -import torch -from torch import nn -from torch.nn import functional as F - -__all__ = [ - 'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', 'osnet_ibn_x1_0' -] - -pretrained_urls = { - 'osnet_x1_0': - 'https://drive.google.com/uc?id=1LaG1EJpHrxdAxKnSCJ_i0u-nbxSAeiFY', - 'osnet_x0_75': - 'https://drive.google.com/uc?id=1uwA9fElHOk3ZogwbeY5GkLI6QPTX70Hq', - 'osnet_x0_5': - 'https://drive.google.com/uc?id=16DGLbZukvVYgINws8u8deSaOqjybZ83i', - 'osnet_x0_25': - 'https://drive.google.com/uc?id=1rb8UN5ZzPKRc_xvtHlyDh-cSz88YX9hs', - 'osnet_ibn_x1_0': - 'https://drive.google.com/uc?id=1sr90V6irlYYDd4_4ISU2iruoRG8J__6l' -} - - -########## -# Basic layers -########## -class ConvLayer(nn.Module): - """Convolution layer (conv + bn + relu).""" - - def __init__( - self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - groups=1, - IN=False - ): - super(ConvLayer, self).__init__() - self.conv = nn.Conv2d( - in_channels, - out_channels, - kernel_size, - stride=stride, - padding=padding, - bias=False, - groups=groups - ) - if IN: - self.bn = nn.InstanceNorm2d(out_channels, affine=True) - else: - self.bn = nn.BatchNorm2d(out_channels) - self.relu = nn.ReLU(inplace=True) - - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - x = self.relu(x) - return x - - -class Conv1x1(nn.Module): - """1x1 convolution + bn + relu.""" - - def __init__(self, in_channels, out_channels, stride=1, groups=1): - super(Conv1x1, self).__init__() - self.conv = nn.Conv2d( - in_channels, - out_channels, - 1, - stride=stride, - padding=0, - bias=False, - groups=groups - ) - self.bn = nn.BatchNorm2d(out_channels) - self.relu = nn.ReLU(inplace=True) - - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - x = self.relu(x) - return x - - -class Conv1x1Linear(nn.Module): - """1x1 convolution + bn (w/o non-linearity).""" - - def __init__(self, in_channels, out_channels, stride=1): - super(Conv1x1Linear, self).__init__() - self.conv = nn.Conv2d( - in_channels, out_channels, 1, stride=stride, padding=0, bias=False - ) - self.bn = nn.BatchNorm2d(out_channels) - - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - return x - - -class Conv3x3(nn.Module): - """3x3 convolution + bn + relu.""" - - def __init__(self, in_channels, out_channels, stride=1, groups=1): - super(Conv3x3, self).__init__() - self.conv = nn.Conv2d( - in_channels, - out_channels, - 3, - stride=stride, - padding=1, - bias=False, - groups=groups - ) - self.bn = nn.BatchNorm2d(out_channels) - self.relu = nn.ReLU(inplace=True) - - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - x = self.relu(x) - return x - - -class LightConv3x3(nn.Module): - """Lightweight 3x3 convolution. - - 1x1 (linear) + dw 3x3 (nonlinear). - """ - - def __init__(self, in_channels, out_channels): - super(LightConv3x3, self).__init__() - self.conv1 = nn.Conv2d( - in_channels, out_channels, 1, stride=1, padding=0, bias=False - ) - self.conv2 = nn.Conv2d( - out_channels, - out_channels, - 3, - stride=1, - padding=1, - bias=False, - groups=out_channels - ) - self.bn = nn.BatchNorm2d(out_channels) - self.relu = nn.ReLU(inplace=True) - - def forward(self, x): - x = self.conv1(x) - x = self.conv2(x) - x = self.bn(x) - x = self.relu(x) - return x - - -########## -# Building blocks for omni-scale feature learning -########## -class ChannelGate(nn.Module): - """A mini-network that generates channel-wise gates conditioned on input tensor.""" - - def __init__( - self, - in_channels, - num_gates=None, - return_gates=False, - gate_activation='sigmoid', - reduction=16, - layer_norm=False - ): - super(ChannelGate, self).__init__() - if num_gates is None: - num_gates = in_channels - self.return_gates = return_gates - self.global_avgpool = nn.AdaptiveAvgPool2d(1) - self.fc1 = nn.Conv2d( - in_channels, - in_channels // reduction, - kernel_size=1, - bias=True, - padding=0 - ) - self.norm1 = None - if layer_norm: - self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1)) - self.relu = nn.ReLU(inplace=True) - self.fc2 = nn.Conv2d( - in_channels // reduction, - num_gates, - kernel_size=1, - bias=True, - padding=0 - ) - if gate_activation == 'sigmoid': - self.gate_activation = nn.Sigmoid() - elif gate_activation == 'relu': - self.gate_activation = nn.ReLU(inplace=True) - elif gate_activation == 'linear': - self.gate_activation = None - else: - raise RuntimeError( - "Unknown gate activation: {}".format(gate_activation) - ) - - def forward(self, x): - input = x - x = self.global_avgpool(x) - x = self.fc1(x) - if self.norm1 is not None: - x = self.norm1(x) - x = self.relu(x) - x = self.fc2(x) - if self.gate_activation is not None: - x = self.gate_activation(x) - if self.return_gates: - return x - return input * x - - -class OSBlock(nn.Module): - """Omni-scale feature learning block.""" - - def __init__( - self, - in_channels, - out_channels, - IN=False, - bottleneck_reduction=4, - **kwargs - ): - super(OSBlock, self).__init__() - mid_channels = out_channels // bottleneck_reduction - self.conv1 = Conv1x1(in_channels, mid_channels) - self.conv2a = LightConv3x3(mid_channels, mid_channels) - self.conv2b = nn.Sequential( - LightConv3x3(mid_channels, mid_channels), - LightConv3x3(mid_channels, mid_channels), - ) - self.conv2c = nn.Sequential( - LightConv3x3(mid_channels, mid_channels), - LightConv3x3(mid_channels, mid_channels), - LightConv3x3(mid_channels, mid_channels), - ) - self.conv2d = nn.Sequential( - LightConv3x3(mid_channels, mid_channels), - LightConv3x3(mid_channels, mid_channels), - LightConv3x3(mid_channels, mid_channels), - LightConv3x3(mid_channels, mid_channels), - ) - self.gate = ChannelGate(mid_channels) - self.conv3 = Conv1x1Linear(mid_channels, out_channels) - self.downsample = None - if in_channels != out_channels: - self.downsample = Conv1x1Linear(in_channels, out_channels) - self.IN = None - if IN: - self.IN = nn.InstanceNorm2d(out_channels, affine=True) - - def forward(self, x): - identity = x - x1 = self.conv1(x) - x2a = self.conv2a(x1) - x2b = self.conv2b(x1) - x2c = self.conv2c(x1) - x2d = self.conv2d(x1) - x2 = self.gate(x2a) + self.gate(x2b) + self.gate(x2c) + self.gate(x2d) - x3 = self.conv3(x2) - if self.downsample is not None: - identity = self.downsample(identity) - out = x3 + identity - if self.IN is not None: - out = self.IN(out) - return F.relu(out) - - -########## -# Network architecture -########## -class OSNet(nn.Module): - """Omni-Scale Network. - - Reference: - - Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019. - - Zhou et al. Learning Generalisable Omni-Scale Representations - for Person Re-Identification. TPAMI, 2021. - """ - - def __init__( - self, - num_classes, - blocks, - layers, - channels, - feature_dim=512, - loss='softmax', - IN=False, - **kwargs - ): - super(OSNet, self).__init__() - num_blocks = len(blocks) - assert num_blocks == len(layers) - assert num_blocks == len(channels) - 1 - self.loss = loss - self.feature_dim = feature_dim - - # convolutional backbone - self.conv1 = ConvLayer(3, channels[0], 7, stride=2, padding=3, IN=IN) - self.maxpool = nn.MaxPool2d(3, stride=2, padding=1) - self.conv2 = self._make_layer( - blocks[0], - layers[0], - channels[0], - channels[1], - reduce_spatial_size=True, - IN=IN - ) - self.conv3 = self._make_layer( - blocks[1], - layers[1], - channels[1], - channels[2], - reduce_spatial_size=True - ) - self.conv4 = self._make_layer( - blocks[2], - layers[2], - channels[2], - channels[3], - reduce_spatial_size=False - ) - self.conv5 = Conv1x1(channels[3], channels[3]) - self.global_avgpool = nn.AdaptiveAvgPool2d(1) - # fully connected layer - self.fc = self._construct_fc_layer( - self.feature_dim, channels[3], dropout_p=None - ) - # identity classification layer - self.classifier = nn.Linear(self.feature_dim, num_classes) - - self._init_params() - - def _make_layer( - self, - block, - layer, - in_channels, - out_channels, - reduce_spatial_size, - IN=False - ): - layers = [] - - layers.append(block(in_channels, out_channels, IN=IN)) - for i in range(1, layer): - layers.append(block(out_channels, out_channels, IN=IN)) - - if reduce_spatial_size: - layers.append( - nn.Sequential( - Conv1x1(out_channels, out_channels), - nn.AvgPool2d(2, stride=2) - ) - ) - - return nn.Sequential(*layers) - - def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): - if fc_dims is None or fc_dims < 0: - self.feature_dim = input_dim - return None - - if isinstance(fc_dims, int): - fc_dims = [fc_dims] - - layers = [] - for dim in fc_dims: - layers.append(nn.Linear(input_dim, dim)) - layers.append(nn.BatchNorm1d(dim)) - layers.append(nn.ReLU(inplace=True)) - if dropout_p is not None: - layers.append(nn.Dropout(p=dropout_p)) - input_dim = dim - - self.feature_dim = fc_dims[-1] - - return nn.Sequential(*layers) - - def _init_params(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_( - m.weight, mode='fan_out', nonlinearity='relu' - ) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - elif isinstance(m, nn.BatchNorm2d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - elif isinstance(m, nn.BatchNorm1d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def featuremaps(self, x): - x = self.conv1(x) - x = self.maxpool(x) - x = self.conv2(x) - x = self.conv3(x) - x = self.conv4(x) - x = self.conv5(x) - return x - - def forward(self, x, return_featuremaps=False): - x = self.featuremaps(x) - if return_featuremaps: - return x - v = self.global_avgpool(x) - v = v.view(v.size(0), -1) - if self.fc is not None: - v = self.fc(v) - if not self.training: - return v - y = self.classifier(v) - if self.loss == 'softmax': - return y - elif self.loss == 'triplet': - return y, v - else: - raise KeyError("Unsupported loss: {}".format(self.loss)) - - -def init_pretrained_weights(model, key=''): - """Initializes model with pretrained weights. - - Layers that don't match with pretrained layers in name or size are kept unchanged. - """ - import os - import errno - import gdown - from collections import OrderedDict - - def _get_torch_home(): - ENV_TORCH_HOME = 'TORCH_HOME' - ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME' - DEFAULT_CACHE_DIR = '~/.cache' - torch_home = os.path.expanduser( - os.getenv( - ENV_TORCH_HOME, - os.path.join( - os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch' - ) - ) - ) - return torch_home - - torch_home = _get_torch_home() - model_dir = os.path.join(torch_home, 'checkpoints') - try: - os.makedirs(model_dir) - except OSError as e: - if e.errno == errno.EEXIST: - # Directory already exists, ignore. - pass - else: - # Unexpected OSError, re-raise. - raise - filename = key + '_imagenet.pth' - cached_file = os.path.join(model_dir, filename) - - if not os.path.exists(cached_file): - gdown.download(pretrained_urls[key], cached_file, quiet=False) - - state_dict = torch.load(cached_file) - model_dict = model.state_dict() - new_state_dict = OrderedDict() - matched_layers, discarded_layers = [], [] - - for k, v in state_dict.items(): - if k.startswith('module.'): - k = k[7:] # discard module. - - if k in model_dict and model_dict[k].size() == v.size(): - new_state_dict[k] = v - matched_layers.append(k) - else: - discarded_layers.append(k) - - model_dict.update(new_state_dict) - model.load_state_dict(model_dict) - - if len(matched_layers) == 0: - warnings.warn( - 'The pretrained weights from "{}" cannot be loaded, ' - 'please check the key names manually ' - '(** ignored and continue **)'.format(cached_file) - ) - else: - print( - 'Successfully loaded imagenet pretrained weights from "{}"'. - format(cached_file) - ) - if len(discarded_layers) > 0: - print( - '** The following layers are discarded ' - 'due to unmatched keys or layer size: {}'. - format(discarded_layers) - ) - - -########## -# Instantiation -########## -def osnet_x1_0(num_classes=1000, pretrained=True, loss='softmax', **kwargs): - # standard size (width x1.0) - model = OSNet( - num_classes, - blocks=[OSBlock, OSBlock, OSBlock], - layers=[2, 2, 2], - channels=[64, 256, 384, 512], - loss=loss, - **kwargs - ) - if pretrained: - init_pretrained_weights(model, key='osnet_x1_0') - return model - - -def osnet_x0_75(num_classes=1000, pretrained=True, loss='softmax', **kwargs): - # medium size (width x0.75) - model = OSNet( - num_classes, - blocks=[OSBlock, OSBlock, OSBlock], - layers=[2, 2, 2], - channels=[48, 192, 288, 384], - loss=loss, - **kwargs - ) - if pretrained: - init_pretrained_weights(model, key='osnet_x0_75') - return model - - -def osnet_x0_5(num_classes=1000, pretrained=True, loss='softmax', **kwargs): - # tiny size (width x0.5) - model = OSNet( - num_classes, - blocks=[OSBlock, OSBlock, OSBlock], - layers=[2, 2, 2], - channels=[32, 128, 192, 256], - loss=loss, - **kwargs - ) - if pretrained: - init_pretrained_weights(model, key='osnet_x0_5') - return model - - -def osnet_x0_25(num_classes=1000, pretrained=True, loss='softmax', **kwargs): - # very tiny size (width x0.25) - model = OSNet( - num_classes, - blocks=[OSBlock, OSBlock, OSBlock], - layers=[2, 2, 2], - channels=[16, 64, 96, 128], - loss=loss, - **kwargs - ) - if pretrained: - init_pretrained_weights(model, key='osnet_x0_25') - return model - - -def osnet_ibn_x1_0( - num_classes=1000, pretrained=True, loss='softmax', **kwargs -): - # standard size (width x1.0) + IBN layer - # Ref: Pan et al. Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net. ECCV, 2018. - model = OSNet( - num_classes, - blocks=[OSBlock, OSBlock, OSBlock], - layers=[2, 2, 2], - channels=[64, 256, 384, 512], - loss=loss, - IN=True, - **kwargs - ) - if pretrained: - init_pretrained_weights(model, key='osnet_ibn_x1_0') - return model diff --git a/spaces/raedeXanto/academic-chatgpt-beta/EX4-to-MQ4 v4.0.427.rar The Best Decompiler for MetaTrader 4 Build 509 and Earlier.md b/spaces/raedeXanto/academic-chatgpt-beta/EX4-to-MQ4 v4.0.427.rar The Best Decompiler for MetaTrader 4 Build 509 and Earlier.md deleted file mode 100644 index 4f58cda39972e2a9c7198e39583f7ae0e841f44f..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/EX4-to-MQ4 v4.0.427.rar The Best Decompiler for MetaTrader 4 Build 509 and Earlier.md +++ /dev/null @@ -1,115 +0,0 @@ - -

    What is ex4-to-mq4 v4.0.427.rar and why do you need it?

    -

    If you are a Forex trader who uses MetaTrader as your trading platform, you may have encountered EX4 files that contain trading robots, technical indicators or scripts. These files are executable programs that run on MetaTrader and perform various trading tasks. However, sometimes you may want to access the source code of these files, which is usually written in the MQL language and saved as MQ4 files. This is where ex4-to-mq4 v4.0.427.rar comes in handy.

    -

    ex4-to-mq4 v4.0.427.rar


    Download ……… https://tinourl.com/2uL3JE



    -

    What is ex4-to-mq4 v4.0.427.rar?

    -

    Ex4-to-mq4 v4.0.427.rar is a software tool that can decompile any EX4 file and save its source code as an MQ4 file. A decompiler is a reverse engineering tool that can recover the original code from an executable file.

    -

    What is an EX4 file?

    -

    An EX4 file is a compiled version of an MQ4 file that can be executed by MetaTrader. MetaTrader is a popular trading platform that allows traders to trade on various financial markets, such as Forex, stocks, commodities or cryptocurrencies. MetaTrader has a built-in programming language called MQL, which stands for MetaQuotes Language. MQL allows traders to create their own trading robots, technical indicators or scripts that can automate various trading tasks or provide useful information.

    -

    An MQ4 file is a source code file that contains the MQL code of a trading robot, technical indicator or script. An MQ4 file can be edited and modified using MetaEditor, which is an integrated development environment (IDE) for MQL programming.

    -

    ex4 to mq4 decompiler v4.0.427.4 download
    -ex4 to mq4 decompiler online
    -ex4 to mq4 decompiler 2023 edition
    -ex4 to mq4 decompiler service
    -ex4 to mq4 decompiler free
    -ex4 to mq4 decompiler crack
    -ex4 to mq4 decompiler software
    -ex4 to mq4 decompiler tutorial
    -ex4 to mq4 decompiler for mt4 build 600+
    -ex4 to mq4 decompiler for mt5
    -ex4 to mq4 converter
    -ex4 to mq4 source code recovery
    -ex4 to mq4 reverse engineering
    -ex4 to mq4 debugger
    -ex4 to mq4 manual decompilation
    -ex4 to mq4 pro
    -ex4 to mq4 org
    -ex4 to mq4 carigold
    -ex4 to mq4 trello
    -ex4 to mq4 4shared
    -how to decompile ex4 file into source code mq4
    -how to use ex4 to mq4 decompiler v4.0.427.1
    -how to convert ex4 file to mq4 format
    -how to recover source code from ex4 file
    -how to reverse engineer an ex4 file
    -how to debug an ex4 file
    -how to manually decompile an ex4 file
    -how to use ex4tomq.pro service
    -how to use ex44tomq.org service
    -how to use carigold forum for decompilation
    -how to use trello for converting files
    -how to use 44shared for downloading files
    -what is the difference between ex44 and mq44 files
    -what is the purpose of decompiling files
    -what is the best quality of decompilation for forex experts and indicators
    -what is the latest version of the decompiler software
    -what is the problem and solution of ex44 file decompilation
    -what is the new mechanism for compiling programs in metatrader build 600+
    -what is the reason for metaquotes inc. changing the compilation method
    -what is the mql market service and how does it work
    -where can I find the best decompiler software for download
    -where can I find the best online service for decompilation
    -where can I find the best tutorial for decompiling files
    -where can I find the best forum for discussing decompilation issues
    -where can I find the best tools for reverse-engineering files
    -why do I need the source code of a trading robot in the mq44 format
    -why do some developers forget to include the source code file with their product
    -why do some traders want to understand the trading strategy they use
    -why did the automatic decompilation of ex44 files become impossible after 20144
    -why is it important for a trader to have the source code of a trading robot

    -

    To run an MQ4 file on MetaTrader, it needs to be compiled into an EX4 file, which is a binary executable file that can be loaded and executed by MetaTrader. The compilation process converts the human-readable MQL code into machine-readable binary code that can be understood by MetaTrader.

    -

    What is a decompiler?

    -

    A decompiler is a software tool that can perform the opposite process of compilation, i.e., it can recover the source code from an executable file. A decompiler can be useful when the source code of a program is lost or unavailable, but the executable file is still functional.

    -

    Ex4-to-mq4 v4.0.427.rar is a decompiler that can decompile any EX4 file and save its source code as an MQ4 file. The decompilation process is done automatically by the software, without requiring any user input.

    -

    Why do you need ex4-to-mq4 v4.0.427.rar?

    -

    There are several reasons why you may want to decompile an EX4 file and access its source code as an MQ4 file.

    -

    To understand the trading strategy of an EX4 file

    -

    If you have downloaded or purchased an EX4 file that contains a trading robot or a technical indicator, you may want to know how it works and what trading strategy it uses. By decompiling the EX4 file and viewing its source code as an MQ4 file, you can gain insight into the logic and algorithm behind the program.

    -

    This can help you to evaluate the performance and reliability of the program, as well as to learn from its design and implementation.

    -

    To modify or improve an EX4 file

    -

    If you have an EX4 file that contains a trading robot or a technical indicator that you like, but you want to make some changes or improvements to it, you need to access its source code as an MQ4 file.

    -

    By decompiling the EX4 file and editing its source code as an MQ4 file, you can customize and optimize the program according to your preferences and needs.

    -

    For example, you can change some parameters or settings of the program, add some features or functions to it, fix some bugs or errors in it, or integrate it with other programs.

    -

    To protect your intellectual property

    -

    How to use ex4-to-mq4 v4.0.427.rar?

    -

    Using ex4-to-mq4 v4.0.427.rar is very simple and straightforward. Here are the steps you need to follow:

    -

    Download and install ex4-to-mq4 v4.0.427.rar

    -

    You can download ex4-to-mq4 v4.0.427.rar from various online sources, such as this one. The file is a compressed archive that contains the executable file of the decompiler and some other files. You need to extract the archive to a folder on your computer using a software tool like WinRAR or 7-Zip.

    -

    After extracting the archive, you need to run the executable file of the decompiler, which is called EX4-TO-MQ4.exe. You may need to grant permission to run the file if your antivirus or firewall software warns you about it.

    -

    Select an EX4 file to decompile

    -

    Once you run the decompiler, you will see a simple user interface that allows you to select an EX4 file to decompile. You can browse your computer folders and locate the EX4 file you want to decompile, or you can drag and drop the file into the decompiler window.

    -

    The decompiler will automatically detect the EX4 file and display some information about it, such as its name, size, date and build number.

    -

    Save the MQ4 file and open it in MetaEditor

    -

    After selecting the EX4 file to decompile, you need to click on the Decompile button at the bottom of the decompiler window. The decompiler will start working and show you a progress bar that indicates how much of the decompilation process is completed.

    -

    When the decompilation is finished, the decompiler will save the MQ4 file in the same folder as the EX4 file and show you a message that confirms the successful decompilation.

    -

    You can then open the MQ4 file in MetaEditor and view its source code. You can also edit or modify the code as you wish.

    -

    What are the limitations of ex4-to-mq4 v4.0.427.rar?

    -

    While ex4-to-mq4 v4.0.427.rar is a useful and powerful tool that can decompile any EX4 file and save its source code as an MQ4 file, it also has some limitations that you need to be aware of.

    -

    It only works for EX4 files compiled by MetaTrader build no higher than 600

    -

    The most important limitation of ex4-to-mq4 v4.0.427.rar is that it only works for EX4 files that were compiled by MetaTrader build no higher than 600. This means that the EX4 file must have been created in 2013 or earlier.

    -

    It may not recover all the original source code features

    -

    Another limitation of ex4-to-mq4 v4.0.427.rar is that it may not recover all the original features of the source code, such as comments, variable names, formatting or indentation. This is because some of these features are lost or changed during the compilation process and cannot be restored by the decompiler.

    -

    Therefore, the MQ4 file that you get from the decompiler may not look exactly like the original MQ4 file that was used to create the EX4 file. However, the decompiler will try to preserve the functionality and logic of the program as much as possible.

    -

    It may violate the developer's rights and terms of use

    -

    The final limitation of ex4-to-mq4 v4.0.427.rar is that it may violate the rights and terms of use of the developer who created the EX4 file. Some developers may not want their source code to be exposed or modified by others, and they may protect their EX4 files with encryption or obfuscation techniques.

    -

    By decompiling their EX4 files and accessing their source code, you may be infringing their intellectual property rights and breaking their terms of use. This may result in legal consequences or ethical issues.

    -

    Therefore, before using ex4-to-mq4 v4.0.427.rar to decompile an EX4 file, you should always check the developer's website or contact them to ask for their permission and consent.

    -

    Conclusion

    -

    In conclusion, ex4-to-mq4 v4.0.427.rar is a software tool that can decompile any EX4 file and save its source code as an MQ4 file. This can be useful for various purposes, such as understanding the trading strategy of an EX4 file, modifying or improving an EX4 file, or protecting your intellectual property.

    -

    However, ex4-to-mq4 v4.0.427.rar also has some limitations that you need to be aware of, such as working only for EX4 files compiled by MetaTrader build no higher than 600, not recovering all the original source code features, and possibly violating the developer's rights and terms of use.

    -

    Therefore, before using ex4-to-mq4 v4.0.427.rar to decompile an EX4 file, you should always do your research and ask for permission from the developer.

    -

    FAQs

    -

    What is the difference between EX4 and MQ4 files?

    -

    An EX4 file is a compiled version of an MQ4 file that can be executed by MetaTrader. An MQ4 file is a source code file that contains the MQL code of a trading robot, technical indicator or script.

    -

    What is ex4-to-mq4 v4.0.427.rar?

    -

    Ex4-to-mq4 v4.0.427.rar is a software tool that can decompile any EX4 file and save its source code as an MQ4 file.

    -

    Why do I need ex4-to-mq4 v4.0.427.rar?

    -

    Why do I need ex4-to-mq4 v4.0.427.rar?

    -

    You may need ex4-to-mq4 v4.0.427.rar to understand the trading strategy of an EX4 file, to modify or improve an EX4 file, or to protect your intellectual property.

    -

    How do I use ex4-to-mq4 v4.0.427.rar?

    -

    You need to download and install ex4-to-mq4 v4.0.427.rar, select an EX4 file to decompile, and save the MQ4 file and open it in MetaEditor.

    -

    What are the limitations of ex4-to-mq4 v4.0.427.rar?

    -

    Ex4-to-mq4 v4.0.427.rar only works for EX4 files compiled by MetaTrader build no higher than 600, it may not recover all the original source code features, and it may violate the developer's rights and terms of use.

    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/preprocess.py b/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/preprocess.py deleted file mode 100644 index fe5ab25ef7cb4adeb76cad11962f179d6a38edcc..0000000000000000000000000000000000000000 --- a/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/preprocess.py +++ /dev/null @@ -1,285 +0,0 @@ -from multiprocess.pool import ThreadPool -from speaker_encoder.params_data import * -from speaker_encoder.config import librispeech_datasets, anglophone_nationalites -from datetime import datetime -from speaker_encoder import audio -from pathlib import Path -from tqdm import tqdm -import numpy as np - - -class DatasetLog: - """ - Registers metadata about the dataset in a text file. - """ - def __init__(self, root, name): - self.text_file = open(Path(root, "Log_%s.txt" % name.replace("/", "_")), "w") - self.sample_data = dict() - - start_time = str(datetime.now().strftime("%A %d %B %Y at %H:%M")) - self.write_line("Creating dataset %s on %s" % (name, start_time)) - self.write_line("-----") - self._log_params() - - def _log_params(self): - from speaker_encoder import params_data - self.write_line("Parameter values:") - for param_name in (p for p in dir(params_data) if not p.startswith("__")): - value = getattr(params_data, param_name) - self.write_line("\t%s: %s" % (param_name, value)) - self.write_line("-----") - - def write_line(self, line): - self.text_file.write("%s\n" % line) - - def add_sample(self, **kwargs): - for param_name, value in kwargs.items(): - if not param_name in self.sample_data: - self.sample_data[param_name] = [] - self.sample_data[param_name].append(value) - - def finalize(self): - self.write_line("Statistics:") - for param_name, values in self.sample_data.items(): - self.write_line("\t%s:" % param_name) - self.write_line("\t\tmin %.3f, max %.3f" % (np.min(values), np.max(values))) - self.write_line("\t\tmean %.3f, median %.3f" % (np.mean(values), np.median(values))) - self.write_line("-----") - end_time = str(datetime.now().strftime("%A %d %B %Y at %H:%M")) - self.write_line("Finished on %s" % end_time) - self.text_file.close() - - -def _init_preprocess_dataset(dataset_name, datasets_root, out_dir) -> (Path, DatasetLog): - dataset_root = datasets_root.joinpath(dataset_name) - if not dataset_root.exists(): - print("Couldn\'t find %s, skipping this dataset." % dataset_root) - return None, None - return dataset_root, DatasetLog(out_dir, dataset_name) - - -def _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, extension, - skip_existing, logger): - print("%s: Preprocessing data for %d speakers." % (dataset_name, len(speaker_dirs))) - - # Function to preprocess utterances for one speaker - def preprocess_speaker(speaker_dir: Path): - # Give a name to the speaker that includes its dataset - speaker_name = "_".join(speaker_dir.relative_to(datasets_root).parts) - - # Create an output directory with that name, as well as a txt file containing a - # reference to each source file. - speaker_out_dir = out_dir.joinpath(speaker_name) - speaker_out_dir.mkdir(exist_ok=True) - sources_fpath = speaker_out_dir.joinpath("_sources.txt") - - # There's a possibility that the preprocessing was interrupted earlier, check if - # there already is a sources file. - if sources_fpath.exists(): - try: - with sources_fpath.open("r") as sources_file: - existing_fnames = {line.split(",")[0] for line in sources_file} - except: - existing_fnames = {} - else: - existing_fnames = {} - - # Gather all audio files for that speaker recursively - sources_file = sources_fpath.open("a" if skip_existing else "w") - for in_fpath in speaker_dir.glob("**/*.%s" % extension): - # Check if the target output file already exists - out_fname = "_".join(in_fpath.relative_to(speaker_dir).parts) - out_fname = out_fname.replace(".%s" % extension, ".npy") - if skip_existing and out_fname in existing_fnames: - continue - - # Load and preprocess the waveform - wav = audio.preprocess_wav(in_fpath) - if len(wav) == 0: - continue - - # Create the mel spectrogram, discard those that are too short - frames = audio.wav_to_mel_spectrogram(wav) - if len(frames) < partials_n_frames: - continue - - out_fpath = speaker_out_dir.joinpath(out_fname) - np.save(out_fpath, frames) - logger.add_sample(duration=len(wav) / sampling_rate) - sources_file.write("%s,%s\n" % (out_fname, in_fpath)) - - sources_file.close() - - # Process the utterances for each speaker - with ThreadPool(8) as pool: - list(tqdm(pool.imap(preprocess_speaker, speaker_dirs), dataset_name, len(speaker_dirs), - unit="speakers")) - logger.finalize() - print("Done preprocessing %s.\n" % dataset_name) - - -# Function to preprocess utterances for one speaker -def __preprocess_speaker(speaker_dir: Path, datasets_root: Path, out_dir: Path, extension: str, skip_existing: bool): - # Give a name to the speaker that includes its dataset - speaker_name = "_".join(speaker_dir.relative_to(datasets_root).parts) - - # Create an output directory with that name, as well as a txt file containing a - # reference to each source file. - speaker_out_dir = out_dir.joinpath(speaker_name) - speaker_out_dir.mkdir(exist_ok=True) - sources_fpath = speaker_out_dir.joinpath("_sources.txt") - - # There's a possibility that the preprocessing was interrupted earlier, check if - # there already is a sources file. - # if sources_fpath.exists(): - # try: - # with sources_fpath.open("r") as sources_file: - # existing_fnames = {line.split(",")[0] for line in sources_file} - # except: - # existing_fnames = {} - # else: - # existing_fnames = {} - existing_fnames = {} - # Gather all audio files for that speaker recursively - sources_file = sources_fpath.open("a" if skip_existing else "w") - - for in_fpath in speaker_dir.glob("**/*.%s" % extension): - # Check if the target output file already exists - out_fname = "_".join(in_fpath.relative_to(speaker_dir).parts) - out_fname = out_fname.replace(".%s" % extension, ".npy") - if skip_existing and out_fname in existing_fnames: - continue - - # Load and preprocess the waveform - wav = audio.preprocess_wav(in_fpath) - if len(wav) == 0: - continue - - # Create the mel spectrogram, discard those that are too short - frames = audio.wav_to_mel_spectrogram(wav) - if len(frames) < partials_n_frames: - continue - - out_fpath = speaker_out_dir.joinpath(out_fname) - np.save(out_fpath, frames) - # logger.add_sample(duration=len(wav) / sampling_rate) - sources_file.write("%s,%s\n" % (out_fname, in_fpath)) - - sources_file.close() - return len(wav) - -def _preprocess_speaker_dirs_vox2(speaker_dirs, dataset_name, datasets_root, out_dir, extension, - skip_existing, logger): - # from multiprocessing import Pool, cpu_count - from pathos.multiprocessing import ProcessingPool as Pool - # Function to preprocess utterances for one speaker - def __preprocess_speaker(speaker_dir: Path): - # Give a name to the speaker that includes its dataset - speaker_name = "_".join(speaker_dir.relative_to(datasets_root).parts) - - # Create an output directory with that name, as well as a txt file containing a - # reference to each source file. - speaker_out_dir = out_dir.joinpath(speaker_name) - speaker_out_dir.mkdir(exist_ok=True) - sources_fpath = speaker_out_dir.joinpath("_sources.txt") - - existing_fnames = {} - # Gather all audio files for that speaker recursively - sources_file = sources_fpath.open("a" if skip_existing else "w") - wav_lens = [] - for in_fpath in speaker_dir.glob("**/*.%s" % extension): - # Check if the target output file already exists - out_fname = "_".join(in_fpath.relative_to(speaker_dir).parts) - out_fname = out_fname.replace(".%s" % extension, ".npy") - if skip_existing and out_fname in existing_fnames: - continue - - # Load and preprocess the waveform - wav = audio.preprocess_wav(in_fpath) - if len(wav) == 0: - continue - - # Create the mel spectrogram, discard those that are too short - frames = audio.wav_to_mel_spectrogram(wav) - if len(frames) < partials_n_frames: - continue - - out_fpath = speaker_out_dir.joinpath(out_fname) - np.save(out_fpath, frames) - # logger.add_sample(duration=len(wav) / sampling_rate) - sources_file.write("%s,%s\n" % (out_fname, in_fpath)) - wav_lens.append(len(wav)) - sources_file.close() - return wav_lens - - print("%s: Preprocessing data for %d speakers." % (dataset_name, len(speaker_dirs))) - # Process the utterances for each speaker - # with ThreadPool(8) as pool: - # list(tqdm(pool.imap(preprocess_speaker, speaker_dirs), dataset_name, len(speaker_dirs), - # unit="speakers")) - pool = Pool(processes=20) - for i, wav_lens in enumerate(pool.map(__preprocess_speaker, speaker_dirs), 1): - for wav_len in wav_lens: - logger.add_sample(duration=wav_len / sampling_rate) - print(f'{i}/{len(speaker_dirs)} \r') - - logger.finalize() - print("Done preprocessing %s.\n" % dataset_name) - - -def preprocess_librispeech(datasets_root: Path, out_dir: Path, skip_existing=False): - for dataset_name in librispeech_datasets["train"]["other"]: - # Initialize the preprocessing - dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir) - if not dataset_root: - return - - # Preprocess all speakers - speaker_dirs = list(dataset_root.glob("*")) - _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, "flac", - skip_existing, logger) - - -def preprocess_voxceleb1(datasets_root: Path, out_dir: Path, skip_existing=False): - # Initialize the preprocessing - dataset_name = "VoxCeleb1" - dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir) - if not dataset_root: - return - - # Get the contents of the meta file - with dataset_root.joinpath("vox1_meta.csv").open("r") as metafile: - metadata = [line.split("\t") for line in metafile][1:] - - # Select the ID and the nationality, filter out non-anglophone speakers - nationalities = {line[0]: line[3] for line in metadata} - # keep_speaker_ids = [speaker_id for speaker_id, nationality in nationalities.items() if - # nationality.lower() in anglophone_nationalites] - keep_speaker_ids = [speaker_id for speaker_id, nationality in nationalities.items()] - print("VoxCeleb1: using samples from %d (presumed anglophone) speakers out of %d." % - (len(keep_speaker_ids), len(nationalities))) - - # Get the speaker directories for anglophone speakers only - speaker_dirs = dataset_root.joinpath("wav").glob("*") - speaker_dirs = [speaker_dir for speaker_dir in speaker_dirs if - speaker_dir.name in keep_speaker_ids] - print("VoxCeleb1: found %d anglophone speakers on the disk, %d missing (this is normal)." % - (len(speaker_dirs), len(keep_speaker_ids) - len(speaker_dirs))) - - # Preprocess all speakers - _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, "wav", - skip_existing, logger) - - -def preprocess_voxceleb2(datasets_root: Path, out_dir: Path, skip_existing=False): - # Initialize the preprocessing - dataset_name = "VoxCeleb2" - dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir) - if not dataset_root: - return - - # Get the speaker directories - # Preprocess all speakers - speaker_dirs = list(dataset_root.joinpath("dev", "aac").glob("*")) - _preprocess_speaker_dirs_vox2(speaker_dirs, dataset_name, datasets_root, out_dir, "m4a", - skip_existing, logger) diff --git a/spaces/raphaelmerx/MMS-transcription/README.md b/spaces/raphaelmerx/MMS-transcription/README.md deleted file mode 100644 index 63910f84e4a8e8206ef6a037bf91e3db6534cd90..0000000000000000000000000000000000000000 --- a/spaces/raphaelmerx/MMS-transcription/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Transcription using MMS -emoji: 🎤 -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 3.37.0 -app_file: app.py -pinned: true -duplicated_from: ayymen/MMS-ASR ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Audition CC 2018 11.0.2.2 (x64) Crack Keygen [UPD].md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Audition CC 2018 11.0.2.2 (x64) Crack Keygen [UPD].md deleted file mode 100644 index 26e4ca42184176234d229798cf130d2b7c6edbfa..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Audition CC 2018 11.0.2.2 (x64) Crack Keygen [UPD].md +++ /dev/null @@ -1,148 +0,0 @@ -
    -

    How to Download and Use Adobe Audition CC 2018 11.0.2.2 (x64) Crack keygen

    - -

    If you are looking for a professional audio editing software that can handle multitrack, waveform, and spectral display, you might want to check out Adobe Audition CC 2018 11.0.2.2 (x64). This powerful audio workstation is designed to accelerate video production workflows and audio finishing, and deliver a polished mix with pristine sound.

    - -

    However, Adobe Audition CC 2018 11.0.2.2 (x64) is not a free software, and you need to pay a monthly subscription fee to use it. If you want to save some money and still enjoy the features of this software, you can try using a crack keygen that can generate a valid serial number for you.

    -

    Adobe Audition CC 2018 11.0.2.2 (x64) Crack keygen


    Download ••• https://urlgoal.com/2uCMpR



    - -

    In this article, we will show you how to download and use Adobe Audition CC 2018 11.0.2.2 (x64) crack keygen, and what are the benefits and risks of doing so.

    - -

    What is Adobe Audition CC 2018 11.0.2.2 (x64) Crack keygen?

    - -

    A crack keygen is a software tool that can generate a serial number or a license key for another software, such as Adobe Audition CC 2018 11.0.2.2 (x64). By using a crack keygen, you can bypass the activation process of the original software and use it without paying for it.

    - -

    There are many websites that offer crack keygens for various software, but not all of them are reliable or safe. Some of them may contain viruses, malware, or spyware that can harm your computer or steal your personal information. Therefore, you need to be careful when choosing a source for downloading a crack keygen.

    - -

    How to Download and Use Adobe Audition CC 2018 11.0.2.2 (x64) Crack keygen?

    - -

    Here are the steps to download and use Adobe Audition CC 2018 11.0.2.2 (x64) crack keygen:

    - -
      -
    1. Download the original software from the official website of Adobe or from a trusted third-party source.
    2. -
    3. Install the software on your computer, but do not launch it yet.
    4. -
    5. Download the crack keygen from a reliable website that has positive reviews and feedback from other users.
    6. -
    7. Extract the crack keygen file using a file archiver program such as WinRAR or 7-Zip.
    8. -
    9. Run the crack keygen as an administrator and click on the Generate button.
    10. -
    11. Copy the generated serial number and paste it into the activation window of Adobe Audition CC 2018 11.0.2.2 (x64).
    12. -
    13. Click on the Activate button and wait for the confirmation message.
    14. -
    15. Launch the software and enjoy its features.
    16. -
    - -

    What are the Benefits and Risks of Using Adobe Audition CC 2018 11.0.2.2 (x64) Crack keygen?

    - -

    Using Adobe Audition CC 2018 11.0.2.2 (x64) crack keygen has some benefits and risks that you need to be aware of before deciding to use it.

    -

    - -

    The main benefit of using a crack keygen is that you can save money and use the software without paying for it. You can also access all the features and updates of the software without any limitations or restrictions.

    - -

    The main risk of using a crack keygen is that you may violate the terms and conditions of the original software developer and face legal consequences for piracy or copyright infringement. You may also expose your computer to viruses, malware, or spyware that can damage your system or compromise your security and privacy.

    - -

    Therefore, you need to weigh the pros and cons of using a crack keygen and decide whether it is worth it or not.

    - -

    Conclusion

    - -

    Adobe Audition CC 2018 11.0.2.2 (x64) is a professional audio editing software that can help you create, mix, and edit audio content with ease and efficiency.

    - -

    If you want to use this software without paying for it, you can try using a crack keygen that can generate a valid serial number for you.

    - -

    However, using a crack keygen has some benefits and risks that you need to consider before doing so.

    - -

    We hope this article has helped you understand how to download and use Adobe Audition CC 2018 11.0.2.2 (x64) crack keygen, and what are the advantages and disadvantages of doing so.

    -

    How to Use Adobe Audition CC 2018 11.0.2.2 (x64) for Audio Editing?

    - -

    Now that you have downloaded and activated Adobe Audition CC 2018 11.0.2.2 (x64) using a crack keygen, you may wonder how to use it for your audio editing projects.

    - -

    In this section, we will give you a brief overview of the main features and functions of Adobe Audition CC 2018 11.0.2.2 (x64) and how to use them.

    - -

    Essential Sound Panel

    - -

    The Essential Sound panel is a new feature in Adobe Audition CC 2018 that allows you to achieve professional-quality audio with simple and intuitive controls.

    - -

    The Essential Sound panel lets you assign audio types to your clips, such as Dialogue, Music, Sound Effects, or Ambience, and apply presets or custom adjustments to them.

    - -

    You can also use the Essential Sound panel to duck music behind dialogue, reduce noise, enhance speech clarity, add reverb, and more.

    - -

    To use the Essential Sound panel, follow these steps:

    - -
      -
    1. Select one or more clips in the Multitrack Editor or the Waveform Editor.
    2. -
    3. Click on the Essential Sound tab in the right panel.
    4. -
    5. Choose an audio type from the drop-down menu at the top of the panel.
    6. -
    7. Adjust the sliders and options according to your needs and preferences.
    8. -
    9. Click on the Apply button to apply the changes to your clips.
    10. -
    - -

    Multitrack Editor

    - -

    The Multitrack Editor is where you can record, edit, and mix multiple audio tracks in a non-destructive way.

    - -

    The Multitrack Editor lets you arrange your clips on separate tracks, add effects and transitions, adjust volume and pan levels, automate keyframes, and more.

    - -

    You can also use the Multitrack Editor to sync your audio with video, export your mixdowns, or send your tracks to Adobe Premiere Pro CC for further editing.

    - -

    To use the Multitrack Editor, follow these steps:

    - -
      -
    1. Click on the File menu and choose New > Multitrack Session.
    2. -
    3. Enter a name for your session and choose a location to save it.
    4. -
    5. Select a template or customize your own settings for sample rate, bit depth, channelization, etc.
    6. -
    7. Click on the OK button to create your session.
    8. -
    9. Import your audio files by dragging and dropping them from the Media Browser or the Files panel to the desired tracks in the Timeline.
    10. -
    11. Edit your clips by trimming, splitting, moving, fading, crossfading, etc.
    12. -
    13. Add effects to your clips or tracks by clicking on the FX button in the track header or clip header and choosing from the available effects.
    14. -
    15. Mix your tracks by adjusting the volume and pan faders in the Mixer panel or in the track headers.
    16. -
    17. Add automation to your tracks by clicking on the Show/Hide Automation button in the track header and choosing a parameter to automate.
    18. -
    19. Export your mixdown by clicking on the File menu and choosing Export > Multitrack Mixdown > Entire Session or Selected Clips.
    20. -
    - -

    Waveform Editor

    - -

    The Waveform Editor is where you can edit individual audio files in a destructive way.

    - -

    The Waveform Editor lets you view and modify your audio waveform with various tools and commands, such as cut, copy, paste, delete, silence, amplify, normalize, etc.

    - -

    You can also use the Waveform Editor to apply effects and processes to your audio file, such as noise reduction, compression, equalization, pitch correction, etc.

    - -

    To use the Waveform Editor, follow these steps:

    - -
      -
    1. Import your audio file by clicking on the File menu and choosing Open or Import File.
    2. -
    3. Select a part of your audio file by clicking and dragging on the waveform or using the Time Selection tool.
    4. -
    5. Edit your selection by using the Edit menu commands or keyboard shortcuts.
    6. -
    7. Add effects to your selection by clicking on the Effects menu and choosing from the available effects.
    8. -
    9. Save your changes by clicking on the File menu and choosing Save or Save As.
    10. -
    - -

    Tips and Tricks for Using Adobe Audition CC 2018 11.0.2.2 (x64)

    - -

    To help you get the most out of Adobe Audition CC 2018 11.0.2.2 (x64), here are some tips and tricks that you can use:

    - -
      -
    • Use keyboard shortcuts to speed up your workflow. You can view and customize keyboard shortcuts by clicking on the Edit menu and choosing Keyboard Shortcuts.
    • -
    • Use workspaces to organize your panels according to your tasks. You can switch between different workspaces by clicking on the Window menu and choosing Workspace. You can also create your own custom workspaces by arranging and docking panels as you like.
    • -
    • Use markers and metadata to annotate and organize your audio files. You can add markers by pressing M while playing back or recording audio. You can edit markers by double-clicking on them in the Markers panel. You can add metadata by clicking on the File menu and choosing File Info.
    • -
    • Use batch processing to apply effects or processes to multiple files at once. You can access batch processing by clicking on the File menu and choosing Batch Process. You can also create custom scripts for batch processing by clicking on the Effects menu and choosing Favorites > Edit Favorites.
    • -
    • Use spectral frequency display to view and edit your audio in terms of frequency instead of amplitude. You can switch between waveform display and spectral frequency display by clicking on the View menu and choosing Waveform/Spectral Frequency Display. You can also use spectral editing tools such as Marquee Selection tool or Lasso Selection tool to select specific frequency ranges for editing.
    • -
    - -

    Conclusion

    - -

    In this article, we have shown you how to download and use Adobe Audition CC 2018 11.0.2.2 (x64) crack keygen, what are its main features and functions, and how to use them for audio editing projects.

    - -

    We hope this article has been helpful for you and that you have learned something new about Adobe Audition CC 2018 11.0.2.2 (x64).

    - -

    If you have any questions or feedback about this article or Adobe Audition CC 2018 11.0.2.2 (x64), feel free to leave a comment below or contact us via email.

    - -

    Thank you for reading this article and happy audio editing!

    -

    Conclusion

    - -

    In this article, we have shown you how to download and use Adobe Audition CC 2018 11.0.2.2 (x64) crack keygen, what are its main features and functions, and how to use them for audio editing projects.

    - -

    We hope this article has been helpful for you and that you have learned something new about Adobe Audition CC 2018 11.0.2.2 (x64).

    - -

    If you have any questions or feedback about this article or Adobe Audition CC 2018 11.0.2.2 (x64), feel free to leave a comment below or contact us via email.

    - -

    Thank you for reading this article and happy audio editing!

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Audition Cs6 Crack [BEST] Rar.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Audition Cs6 Crack [BEST] Rar.md deleted file mode 100644 index 2b20d391c287e45082937b5089f9d829a5ac041d..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Audition Cs6 Crack [BEST] Rar.md +++ /dev/null @@ -1,34 +0,0 @@ -

    Adobe Audition Cs6 Crack Rar


    Downloadhttps://urlgoal.com/2uCKmI



    - -... adobe audition cc, adobe audition crack, adobe audition 2021, adobe audition cs6, adobe audition autotune, adobe audition free download for windows 10, ... Adobe Audition CC 2018 download .... -Adobe Audition CC 2018 is a professional program for working with audio content. ... -Download Adobe Audition CC 2018 (version 10.0.0.104) - Russian version, for free. -Download Adobe Audition CC 2018 (v10.1.3) (Russian) - torrent. ... -Adobe Audition CC 2018 v10.1.3 (RUS) - Russian version with the possibility of download ... -Download Adobe Audition CC 2017 (v11.1.1.1).torrent; Download Adobe Premiere Pro CC 2018 v12.0.1.55 x64-x32 (RUS).torrent. - Download torrent Adobe Premiere Pro CC 2018 12.1.3.58 RePack [x64-x32] [2018] ... -Adobe Premiere Pro CC 2017 v11.0.1 (x32/x64) RePack by D!akov. -Download free Adobe Photoshop CS5 Extended 12.0.4 Rus (x32/x64) ... -Adobe Premiere Pro CC 2017 v11.0.1 (x32/x64) RePack by D!akov ... -Adobe Premiere Pro CC 2017 (v11.1) (x32/x64/RUS) - Free Download. -Adobe Photoshop Lightroom Classic v8.4 Final [2018/Multi/Rus] Portable. -Adobe Premiere Pro CC 2018 v12.1.3.58 RePack [x64-x32] [2018 ... - Adobe Premiere Pro CC 2018 v12.1.3.58 RePack [x86-x64] [2018 ... -Adobe Photoshop Lightroom 5 v5.2.0 Final [2013, Ml/Rus] [by ... -Adobe Premiere Pro CC 2018 v12.1.3.58 RePack [x86-x64] [2018 ... -Adobe Premiere Pro CC 2018 12.1.3.58 RePack by KpoJIuK -Adobe Premiere Pro CC 2018 12.1.3.58 RePack (x86-x64) [2018 ... -Adobe Premiere Pro CC 2018 v12.1.3.58 RePack by KpoJIuK - Year: 2018 Software Version: CC 2018 12.1.3.58 Official website: adobe.ru Builder: by KpoJIuK Interface language: Russian/English Treatment: not required (the installer is already cured) -- Intel® or AMD multi-core processor - 4 GB of RAM (8 GB recommended) - 2 GB of available hard-disk space - 1024x768 display - Internet connection - Microsoft® Windows® 7 with Service Pack 1, Windows 10, or Windows 8.x - 1 GB of available hard-disk space - Description: -Dead Island is a first-person view game that takes you on an adventure on an island teeming with zombies. -Dead Island combines first-person action and survival horror elements. -The game's developers also talk about a kind of parody of famous horror movies involving the dead and buried alive, as well as epidemic movies that take over the world. -"You'll encounter zombies in a very different way here than in many other games of this type," the game's creators tell us. - - If, for example, in the movie "28 Days Later" zombies are rather slow and slow in movement, in the game "28 Days Later" they are very fast, aggressive and nimble dead, capable of killing, jumping and somersaulting because of their impressive size. -And here we also introduce, for the first time in the history of computer games, zombies who carry their own weapons, and their bones can turn into stabbing and cutting weapons. -So you have to watch out for them, especially if you're alone. 8a78ff9644
    -
    -
    -

    diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Encarta Premium 2008 Student ITA.nrg[volpebianca] .rar.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Encarta Premium 2008 Student ITA.nrg[volpebianca] .rar.md deleted file mode 100644 index 328154b6584181592556b83edbfc02bc9f6c8db6..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Encarta Premium 2008 Student ITA.nrg[volpebianca] .rar.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Encarta Premium 2008 Student ITA.nrg[volpebianca] .rar


    Download File ☆☆☆ https://urlgoal.com/2uCKtx



    -
    -zip-waybackapc.app.geekmanuals.google.kaspersky.it.workbookbazaar.rar.rar.zip-waybackapc.archive.adobe.help.isis.zpa.nrg.rar.zip-waybackapc.archive.z1-7.x.rar.zip-waybackapc.archive.z2-7.x.rar.zip-waybackapc.archive.z3-7.x.rar.zip-waybackapc.archive.z4-7.x.rar.zip-waybackapc.archive.z5-7.x.rar.zip-waybackapc.archive.z6-7.x.rar.zip-waybackapc.archive.z7-7.x.rar.zip-waybackapc.archive.z8-7.x.rar.zip-waybackapc.archive.z9-7.x.rar.zip-waybackapc.archive.za-7.x.rar.zip-waybackapc.archive.zb-7.x.rar.zip-waybackapc.archive.zc-7.x.rar.zip-waybackapc.archive.zd-7.x.rar.zip-waybackapc.archive.ze-7.x.rar.zip-waybackapc.archive.zf-7.x.rar.zip-waybackapc.archive.zg-7.x.rar.zip-waybackapc.archive.zh-7.x.rar.zip-waybackapc.archive.zi-7.x.rar.zip-waybackapc.archive.zj-7.x.rar.zip-waybackapc.archive.zk-7.x.rar.zip-waybackapc.archive.zl-7.x.rar.zip-waybackapc.archive.zm-7.x.rar.zip-waybackapc.archive.zn-7.x.rar.zip-waybackapc.archive.zo-7.x.rar.zip-waybackapc.archive.zp-7.x.rar.zip-waybackapc.archive.zq-7. 4fefd39f24
    -
    -
    -

    diff --git a/spaces/reilnuud/polite/README.md b/spaces/reilnuud/polite/README.md deleted file mode 100644 index 0309caba4a9171f77219f37ffa7b5124a96e46f5..0000000000000000000000000000000000000000 --- a/spaces/reilnuud/polite/README.md +++ /dev/null @@ -1,17 +0,0 @@ - ---- -tags: [gradio-theme] -title: polite -colorFrom: orange -colorTo: purple -sdk: gradio -sdk_version: 3.28.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- -# polite -## Description -Add a description of this theme here! -## Contributions -Thanks to [@reilnuud](https://huggingface.co/reilnuud) for adding this gradio theme! diff --git a/spaces/robin0307/MMOCR/configs/_base_/det_datasets/synthtext.py b/spaces/robin0307/MMOCR/configs/_base_/det_datasets/synthtext.py deleted file mode 100644 index fb9a44b3422dae5a9788d39b0901335dfc6076a9..0000000000000000000000000000000000000000 --- a/spaces/robin0307/MMOCR/configs/_base_/det_datasets/synthtext.py +++ /dev/null @@ -1,18 +0,0 @@ -dataset_type = 'TextDetDataset' -data_root = 'data/synthtext' - -train = dict( - type=dataset_type, - ann_file=f'{data_root}/instances_training.lmdb', - loader=dict( - type='AnnFileLoader', - repeat=1, - file_format='lmdb', - parser=dict( - type='LineJsonParser', - keys=['file_name', 'height', 'width', 'annotations'])), - img_prefix=f'{data_root}/imgs', - pipeline=None) - -train_list = [train] -test_list = [train] diff --git a/spaces/rorallitri/biomedical-language-models/Outkast-ATLiens-Full-Album-Zip-LINK.md b/spaces/rorallitri/biomedical-language-models/Outkast-ATLiens-Full-Album-Zip-LINK.md deleted file mode 100644 index 0e514be43451fdc38551958560abb09a9d4ddd98..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/Outkast-ATLiens-Full-Album-Zip-LINK.md +++ /dev/null @@ -1,62 +0,0 @@ -## Outkast, ATLiens Full Album Zip - - - - - - - - - - - -**CLICK HERE ->>->>->> [https://vittuv.com/2txUfG](https://vittuv.com/2txUfG)** - - - - - - - - - - - - - -# Outkast's ATLiens: A Classic Album of Southern Hip Hop - - - -Outkast is one of the most influential and successful hip hop groups of all time. The duo of Andre 3000 and Big Boi emerged from Atlanta, Georgia in the early 1990s and brought a fresh and innovative sound to the genre. Their second album, ATLiens, released in 1996, is widely regarded as a masterpiece of southern hip hop. - - - -ATLiens showcases Outkast's lyrical skills, musical diversity, and creative vision. The album features a blend of live instruments and samples, creating a futuristic and funky atmosphere. The album also explores themes of spirituality, alienation, identity, and social commentary. Some of the standout tracks include "Two Dope Boyz (In a Cadillac)", "ATLiens", "Elevators (Me & You)", "Jazzy Belle", and "13th Floor/Growing Old". - - - -The album received critical acclaim and commercial success, selling over two million copies in the US and earning a platinum certification. It also reached number two on the Billboard 200 chart and number one on the Top R&B/Hip-Hop Albums chart. ATLiens is considered one of the best hip hop albums of all time by many critics and fans. It has influenced many artists across genres and generations, such as Kendrick Lamar, Drake, J. Cole, Childish Gambino, and Frank Ocean. - - - -If you want to listen to Outkast's ATLiens full album zip, you can download it for free from the Internet Archive[^1^] or stream it on YouTube[^2^] or SoundCloud[^3^]. You can also buy it from online stores or streaming platforms. You won't regret it! - - - -One of the most remarkable aspects of ATLiens is how Outkast manage to balance their experimental impulses with their commercial appeal. The album spawned three hit singles: "Elevators (Me & You)", which reached number 12 on the Billboard Hot 100 chart; "ATLiens", which peaked at number 35; and "Jazzy Belle", which climbed to number 52. All three songs showcase Outkast's distinctive style of storytelling, wordplay, and humor, as well as their ability to craft catchy hooks and memorable melodies. - - - -Another notable feature of ATLiens is how Outkast collaborate with other artists from their native Atlanta. The album features guest appearances by Goodie Mob members Cee-Lo, Big Gipp, Khujo, and T-Mo, as well as Cool Breeze, Witchdoctor, and Joi. These artists represent the Dungeon Family, a collective of musicians and producers who share a similar vision of southern hip hop. The Dungeon Family also includes Organized Noize, who produced most of ATLiens along with Outkast themselves. The album's production is rich and varied, incorporating elements of funk, soul, rock, jazz, and electronic music. - - - -ATLiens is not only a landmark album for Outkast and southern hip hop, but also for hip hop as a whole. It demonstrates that hip hop can be creative, innovative, and diverse without losing its essence or its audience. It also proves that hip hop can transcend regional boundaries and appeal to listeners from different backgrounds and cultures. ATLiens is a testament to Outkast's artistic vision and musical talent, and a classic album that deserves to be heard by everyone. - - dfd1c89656 - - - - - diff --git a/spaces/rorallitri/biomedical-language-models/logs/Ben 10 Alien Force - Vilgax Attacks full movie in italian 720p download link Fast and easy.md b/spaces/rorallitri/biomedical-language-models/logs/Ben 10 Alien Force - Vilgax Attacks full movie in italian 720p download link Fast and easy.md deleted file mode 100644 index fc89c90e2e516cdf692b55c3f8aef3c4ca6311a4..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Ben 10 Alien Force - Vilgax Attacks full movie in italian 720p download link Fast and easy.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Ben 10: Alien Force - Vilgax Attacks full movie in italian 720p download


    Download File 🔗 https://tinurll.com/2uzowt



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/rorallitri/biomedical-language-models/logs/Cocoa Tea Discography Torrent Mega Download All His Reggae Hits.md b/spaces/rorallitri/biomedical-language-models/logs/Cocoa Tea Discography Torrent Mega Download All His Reggae Hits.md deleted file mode 100644 index a546861f747a51e9c3ab5cd87a02b1b2a494fe7e..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Cocoa Tea Discography Torrent Mega Download All His Reggae Hits.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Cocoa Tea Discography Torrent Mega


    DOWNLOAD ✏ ✏ ✏ https://tinurll.com/2uzm6K



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/rorallitri/biomedical-language-models/logs/Flash Decompiler Trillix 3.0 Keygen ((EXCLUSIVE)).md b/spaces/rorallitri/biomedical-language-models/logs/Flash Decompiler Trillix 3.0 Keygen ((EXCLUSIVE)).md deleted file mode 100644 index 60377b7aa67d48aff428c183bca461433bf17362..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Flash Decompiler Trillix 3.0 Keygen ((EXCLUSIVE)).md +++ /dev/null @@ -1,6 +0,0 @@ -

    flash decompiler trillix 3.0 keygen


    Download Filehttps://tinurll.com/2uzouo



    - -Regged-CzW keygen 5004 Flash Decompiler Trillix 3.0.3.470 patch 9266 Flash ... FlashFXP 3.8 (3.7.8 build 1336) Beta crack 5966 FlashFXP ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/rorallitri/biomedical-language-models/logs/Fumetti Il Comandante Mark Pdf.md b/spaces/rorallitri/biomedical-language-models/logs/Fumetti Il Comandante Mark Pdf.md deleted file mode 100644 index 025befbcd067e05a43f03127386737c036d36970..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Fumetti Il Comandante Mark Pdf.md +++ /dev/null @@ -1,6 +0,0 @@ -

    fumetti il comandante mark pdf


    Download Filehttps://tinurll.com/2uzo1f



    - -Fumetto "il comandante Mark"-nuova collana Araldo,n.31 seconda serie del marzo ... Collezione completa COMANDANTE MARK pdf 1/281 su 6 DVD (euro 45 ... 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/rorallitri/biomedical-language-models/logs/GWizard CNC Calculatortorrent.md b/spaces/rorallitri/biomedical-language-models/logs/GWizard CNC Calculatortorrent.md deleted file mode 100644 index 2ea1850f3ffd33fa490ff97999c0acfe9572d8a8..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/GWizard CNC Calculatortorrent.md +++ /dev/null @@ -1,10 +0,0 @@ -

    GWizard CNC Calculatortorrent


    DOWNLOAD 🌟 https://tinurll.com/2uzmJO



    -
    -G-Wizard Calculator is designed to save you time by providing you with a variety of reference materials that every machinist and engineer should have on the job. **************************************** -********************* ******************************* ************** -******************** # It has a "pay by account" feature and you can set your own rates. -If you set your own fare, it will charge for each ride as long as that fare does not exceed the maximum limit you set. # -# Pay by invoice # You can set your own fares and fares. 8a78ff9644
    -
    -
    -

    diff --git a/spaces/rorallitri/biomedical-language-models/logs/Gta 3 Skins Pack VERIFIED.md b/spaces/rorallitri/biomedical-language-models/logs/Gta 3 Skins Pack VERIFIED.md deleted file mode 100644 index 10e7b004ab1a5ffe70e6e8fc827c856910c84d4c..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Gta 3 Skins Pack VERIFIED.md +++ /dev/null @@ -1,17 +0,0 @@ -

    Gta 3 Skins Pack


    Download Ziphttps://tinurll.com/2uzlEV



    - -Two Alternate Joker Skins From Batman, 1, 3, 3409, Creasy_Bear, skin pack, 1, 0, 1670, greengc035, August 07, 2007 Set Skins Niko Bellic for GTA 3 ╟ ║ ║ ║ ║ ║ ║. ... Download skin pack for GTA Sanandres. -Cleo for gta 3 download. -Download mod for GTA San Andreas for cars. -How to download gta san andreas -Downloaded and installed and it is in English -Download skins for gta san andreas. -Download skin pack for free. -Download skin pack for Hollywood. -Download skins for gta sa. -Download skin pack cars for gta san andreas. -I had a skin pack for GTA San Andreas, but I deleted it, maybe you can somehow re-download it and I don't know how to... -What's the pack? 8a78ff9644
    -
    -
    -

    diff --git a/spaces/rushic24/Priyanka-Chopra-TTS/synthesis/vocoders/vocoder.py b/spaces/rushic24/Priyanka-Chopra-TTS/synthesis/vocoders/vocoder.py deleted file mode 100644 index 772d45e52d05c6321b6b6b76b74cfcdc523d08b0..0000000000000000000000000000000000000000 --- a/spaces/rushic24/Priyanka-Chopra-TTS/synthesis/vocoders/vocoder.py +++ /dev/null @@ -1,27 +0,0 @@ -from abc import ABC, abstractmethod - - -MAX_WAV_VALUE = 32768.0 - - -class Vocoder(ABC): - """ - Produces audio data for tacotron2 mel spectrogram output - """ - - @abstractmethod - def generate_audio(self, mel_output): - """ - Produces wav audio data for a given mel output. - - Parameters - ---------- - mel_output : Tensor - Mel spectrogram output - - Returns - ------- - np.array - Generated audio data - """ - pass diff --git a/spaces/ruslanmv/Text2Lip/utils/__init__.py b/spaces/ruslanmv/Text2Lip/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/safi842/FashionGen/models/stylegan2/stylegan2-pytorch/inception.py b/spaces/safi842/FashionGen/models/stylegan2/stylegan2-pytorch/inception.py deleted file mode 100644 index f3afed8123e595f65c1333dea7151e653a836e2b..0000000000000000000000000000000000000000 --- a/spaces/safi842/FashionGen/models/stylegan2/stylegan2-pytorch/inception.py +++ /dev/null @@ -1,310 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from torchvision import models - -try: - from torchvision.models.utils import load_state_dict_from_url -except ImportError: - from torch.utils.model_zoo import load_url as load_state_dict_from_url - -# Inception weights ported to Pytorch from -# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz -FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth' - - -class InceptionV3(nn.Module): - """Pretrained InceptionV3 network returning feature maps""" - - # Index of default block of inception to return, - # corresponds to output of final average pooling - DEFAULT_BLOCK_INDEX = 3 - - # Maps feature dimensionality to their output blocks indices - BLOCK_INDEX_BY_DIM = { - 64: 0, # First max pooling features - 192: 1, # Second max pooling featurs - 768: 2, # Pre-aux classifier features - 2048: 3 # Final average pooling features - } - - def __init__(self, - output_blocks=[DEFAULT_BLOCK_INDEX], - resize_input=True, - normalize_input=True, - requires_grad=False, - use_fid_inception=True): - """Build pretrained InceptionV3 - - Parameters - ---------- - output_blocks : list of int - Indices of blocks to return features of. Possible values are: - - 0: corresponds to output of first max pooling - - 1: corresponds to output of second max pooling - - 2: corresponds to output which is fed to aux classifier - - 3: corresponds to output of final average pooling - resize_input : bool - If true, bilinearly resizes input to width and height 299 before - feeding input to model. As the network without fully connected - layers is fully convolutional, it should be able to handle inputs - of arbitrary size, so resizing might not be strictly needed - normalize_input : bool - If true, scales the input from range (0, 1) to the range the - pretrained Inception network expects, namely (-1, 1) - requires_grad : bool - If true, parameters of the model require gradients. Possibly useful - for finetuning the network - use_fid_inception : bool - If true, uses the pretrained Inception model used in Tensorflow's - FID implementation. If false, uses the pretrained Inception model - available in torchvision. The FID Inception model has different - weights and a slightly different structure from torchvision's - Inception model. If you want to compute FID scores, you are - strongly advised to set this parameter to true to get comparable - results. - """ - super(InceptionV3, self).__init__() - - self.resize_input = resize_input - self.normalize_input = normalize_input - self.output_blocks = sorted(output_blocks) - self.last_needed_block = max(output_blocks) - - assert self.last_needed_block <= 3, \ - 'Last possible output block index is 3' - - self.blocks = nn.ModuleList() - - if use_fid_inception: - inception = fid_inception_v3() - else: - inception = models.inception_v3(pretrained=True) - - # Block 0: input to maxpool1 - block0 = [ - inception.Conv2d_1a_3x3, - inception.Conv2d_2a_3x3, - inception.Conv2d_2b_3x3, - nn.MaxPool2d(kernel_size=3, stride=2) - ] - self.blocks.append(nn.Sequential(*block0)) - - # Block 1: maxpool1 to maxpool2 - if self.last_needed_block >= 1: - block1 = [ - inception.Conv2d_3b_1x1, - inception.Conv2d_4a_3x3, - nn.MaxPool2d(kernel_size=3, stride=2) - ] - self.blocks.append(nn.Sequential(*block1)) - - # Block 2: maxpool2 to aux classifier - if self.last_needed_block >= 2: - block2 = [ - inception.Mixed_5b, - inception.Mixed_5c, - inception.Mixed_5d, - inception.Mixed_6a, - inception.Mixed_6b, - inception.Mixed_6c, - inception.Mixed_6d, - inception.Mixed_6e, - ] - self.blocks.append(nn.Sequential(*block2)) - - # Block 3: aux classifier to final avgpool - if self.last_needed_block >= 3: - block3 = [ - inception.Mixed_7a, - inception.Mixed_7b, - inception.Mixed_7c, - nn.AdaptiveAvgPool2d(output_size=(1, 1)) - ] - self.blocks.append(nn.Sequential(*block3)) - - for param in self.parameters(): - param.requires_grad = requires_grad - - def forward(self, inp): - """Get Inception feature maps - - Parameters - ---------- - inp : torch.autograd.Variable - Input tensor of shape Bx3xHxW. Values are expected to be in - range (0, 1) - - Returns - ------- - List of torch.autograd.Variable, corresponding to the selected output - block, sorted ascending by index - """ - outp = [] - x = inp - - if self.resize_input: - x = F.interpolate(x, - size=(299, 299), - mode='bilinear', - align_corners=False) - - if self.normalize_input: - x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1) - - for idx, block in enumerate(self.blocks): - x = block(x) - if idx in self.output_blocks: - outp.append(x) - - if idx == self.last_needed_block: - break - - return outp - - -def fid_inception_v3(): - """Build pretrained Inception model for FID computation - - The Inception model for FID computation uses a different set of weights - and has a slightly different structure than torchvision's Inception. - - This method first constructs torchvision's Inception and then patches the - necessary parts that are different in the FID Inception model. - """ - inception = models.inception_v3(num_classes=1008, - aux_logits=False, - pretrained=False) - inception.Mixed_5b = FIDInceptionA(192, pool_features=32) - inception.Mixed_5c = FIDInceptionA(256, pool_features=64) - inception.Mixed_5d = FIDInceptionA(288, pool_features=64) - inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128) - inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160) - inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160) - inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192) - inception.Mixed_7b = FIDInceptionE_1(1280) - inception.Mixed_7c = FIDInceptionE_2(2048) - - state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True) - inception.load_state_dict(state_dict) - return inception - - -class FIDInceptionA(models.inception.InceptionA): - """InceptionA block patched for FID computation""" - def __init__(self, in_channels, pool_features): - super(FIDInceptionA, self).__init__(in_channels, pool_features) - - def forward(self, x): - branch1x1 = self.branch1x1(x) - - branch5x5 = self.branch5x5_1(x) - branch5x5 = self.branch5x5_2(branch5x5) - - branch3x3dbl = self.branch3x3dbl_1(x) - branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) - branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) - - # Patch: Tensorflow's average pool does not use the padded zero's in - # its average calculation - branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, - count_include_pad=False) - branch_pool = self.branch_pool(branch_pool) - - outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] - return torch.cat(outputs, 1) - - -class FIDInceptionC(models.inception.InceptionC): - """InceptionC block patched for FID computation""" - def __init__(self, in_channels, channels_7x7): - super(FIDInceptionC, self).__init__(in_channels, channels_7x7) - - def forward(self, x): - branch1x1 = self.branch1x1(x) - - branch7x7 = self.branch7x7_1(x) - branch7x7 = self.branch7x7_2(branch7x7) - branch7x7 = self.branch7x7_3(branch7x7) - - branch7x7dbl = self.branch7x7dbl_1(x) - branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) - branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) - branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) - branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) - - # Patch: Tensorflow's average pool does not use the padded zero's in - # its average calculation - branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, - count_include_pad=False) - branch_pool = self.branch_pool(branch_pool) - - outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] - return torch.cat(outputs, 1) - - -class FIDInceptionE_1(models.inception.InceptionE): - """First InceptionE block patched for FID computation""" - def __init__(self, in_channels): - super(FIDInceptionE_1, self).__init__(in_channels) - - def forward(self, x): - branch1x1 = self.branch1x1(x) - - branch3x3 = self.branch3x3_1(x) - branch3x3 = [ - self.branch3x3_2a(branch3x3), - self.branch3x3_2b(branch3x3), - ] - branch3x3 = torch.cat(branch3x3, 1) - - branch3x3dbl = self.branch3x3dbl_1(x) - branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) - branch3x3dbl = [ - self.branch3x3dbl_3a(branch3x3dbl), - self.branch3x3dbl_3b(branch3x3dbl), - ] - branch3x3dbl = torch.cat(branch3x3dbl, 1) - - # Patch: Tensorflow's average pool does not use the padded zero's in - # its average calculation - branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, - count_include_pad=False) - branch_pool = self.branch_pool(branch_pool) - - outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] - return torch.cat(outputs, 1) - - -class FIDInceptionE_2(models.inception.InceptionE): - """Second InceptionE block patched for FID computation""" - def __init__(self, in_channels): - super(FIDInceptionE_2, self).__init__(in_channels) - - def forward(self, x): - branch1x1 = self.branch1x1(x) - - branch3x3 = self.branch3x3_1(x) - branch3x3 = [ - self.branch3x3_2a(branch3x3), - self.branch3x3_2b(branch3x3), - ] - branch3x3 = torch.cat(branch3x3, 1) - - branch3x3dbl = self.branch3x3dbl_1(x) - branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) - branch3x3dbl = [ - self.branch3x3dbl_3a(branch3x3dbl), - self.branch3x3dbl_3b(branch3x3dbl), - ] - branch3x3dbl = torch.cat(branch3x3dbl, 1) - - # Patch: The FID Inception model uses max pooling instead of average - # pooling. This is likely an error in this specific Inception - # implementation, as other Inception models use average pooling here - # (which matches the description in the paper). - branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1) - branch_pool = self.branch_pool(branch_pool) - - outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] - return torch.cat(outputs, 1) diff --git a/spaces/sam-hq-team/sam-hq/README.md b/spaces/sam-hq-team/sam-hq/README.md deleted file mode 100644 index 0711456eca9796e64f39a600c8698291a4ba27b7..0000000000000000000000000000000000000000 --- a/spaces/sam-hq-team/sam-hq/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Sam Hq -emoji: 🏆 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sayakpaul/sidd-denoising-maxim/maxim/blocks/grid_gating.py b/spaces/sayakpaul/sidd-denoising-maxim/maxim/blocks/grid_gating.py deleted file mode 100644 index 91980c874bd1175f1eb0be554f7be99b60cf86bd..0000000000000000000000000000000000000000 --- a/spaces/sayakpaul/sidd-denoising-maxim/maxim/blocks/grid_gating.py +++ /dev/null @@ -1,68 +0,0 @@ -import tensorflow as tf -from tensorflow.keras import backend as K -from tensorflow.keras import layers - -from ..layers import BlockImages, SwapAxes, UnblockImages - - -def GridGatingUnit(use_bias: bool = True, name: str = "grid_gating_unit"): - """A SpatialGatingUnit as defined in the gMLP paper. - - The 'spatial' dim is defined as the second last. - If applied on other dims, you should swapaxes first. - """ - - def apply(x): - u, v = tf.split(x, 2, axis=-1) - v = layers.LayerNormalization( - epsilon=1e-06, name=f"{name}_intermediate_layernorm" - )(v) - n = K.int_shape(x)[-3] # get spatial dim - v = SwapAxes()(v, -1, -3) - v = layers.Dense(n, use_bias=use_bias, name=f"{name}_Dense_0")(v) - v = SwapAxes()(v, -1, -3) - return u * (v + 1.0) - - return apply - - -def GridGmlpLayer( - grid_size, - use_bias: bool = True, - factor: int = 2, - dropout_rate: float = 0.0, - name: str = "grid_gmlp", -): - """Grid gMLP layer that performs global mixing of tokens.""" - - def apply(x): - n, h, w, num_channels = ( - K.int_shape(x)[0], - K.int_shape(x)[1], - K.int_shape(x)[2], - K.int_shape(x)[3], - ) - gh, gw = grid_size - fh, fw = h // gh, w // gw - - x = BlockImages()(x, patch_size=(fh, fw)) - # gMLP1: Global (grid) mixing part, provides global grid communication. - y = layers.LayerNormalization(epsilon=1e-06, name=f"{name}_LayerNorm")(x) - y = layers.Dense( - num_channels * factor, - use_bias=use_bias, - name=f"{name}_in_project", - )(y) - y = tf.nn.gelu(y, approximate=True) - y = GridGatingUnit(use_bias=use_bias, name=f"{name}_GridGatingUnit")(y) - y = layers.Dense( - num_channels, - use_bias=use_bias, - name=f"{name}_out_project", - )(y) - y = layers.Dropout(dropout_rate)(y) - x = x + y - x = UnblockImages()(x, grid_size=(gh, gw), patch_size=(fh, fw)) - return x - - return apply diff --git a/spaces/scedlatioru/img-to-music/example/Battle Slave Fantasia Special Edition.md b/spaces/scedlatioru/img-to-music/example/Battle Slave Fantasia Special Edition.md deleted file mode 100644 index de9090ac918c776264ecca2f92e0916f281cc69a..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Battle Slave Fantasia Special Edition.md +++ /dev/null @@ -1,12 +0,0 @@ -

    Battle Slave Fantasia Special Edition


    Download Zip ··· https://gohhs.com/2uEz4t



    - -But apart from the music, there’s more to The Last Jedi than just one old man telling Rey about his past. Sure, he reveals the truth about his parents, but more importantly, he shares his philosophy and helps her comprehend her place in the galaxy. It’s a powerful, heart-wrenching scene, and the only one to go down well with fans and critics alike. So if the movie’s main message is about living a life of selflessness, why has Kylo Ren gone rogue? Let’s unpack what it means to be the ‘true believer’ in Star Wars. - -In The Last Jedi, fans finally see a return of Kylo Ren. There’s no way of not feeling that this is the guy who was barely in The Force Awakens, and therefore a little flat, compared to Rey and Finn. And yet, it’s the scene in The Last Jedi where Ren is at his most fanatical, calling himself a ‘true believer’, that struck a chord with some of us. Star Wars has always told the story of what happens to people who believe too much in something, but The Last Jedi takes that ideology to another level. As such, we’re able to see how someone like Ren can follow his own path, and not care about the rules and laws of the established order. - -We’ve long been asked, why would Ren follow the light side path, when he’s obviously a villainous villain? Well, it’s twofold. Firstly, he believes that there are no good or evil sides, only black and white. When he’s presented with a choice to turn to the light side, he naturally follows because there’s no reward. Also, when he says he’s the ‘real’ hero, he’s talking from the perspective of a villain. Kylo Ren wants to be seen as the ‘chosen one’, and that means he needs to become evil in order to make that idea true. It’s all an act of manipulation. - -Rey and Finn, on the other hand, have more of a middle-of-the-road approach to this situation. When Finn and Ren fight on the desert planet of Crait, Finn tries to explain to Ren that there are no good or bad sides – only the light and the dark. Ren rejects the idea, claiming 4fefd39f24
    -
    -
    -

    diff --git a/spaces/scedlatioru/img-to-music/example/Digital Workshop Opus Pro 8 Crack [Extra Quality].md b/spaces/scedlatioru/img-to-music/example/Digital Workshop Opus Pro 8 Crack [Extra Quality].md deleted file mode 100644 index d01801e5f6336ade74ecad3000b94fd31f6c693f..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Digital Workshop Opus Pro 8 Crack [Extra Quality].md +++ /dev/null @@ -1,6 +0,0 @@ -
    -

    most of these lesions are treated by endovascular procedures and/or surgical revascularisation. in some cases, the extension of the arterial disease requires surgical bypass. these interventions are associated with a high risk of ischemia in distal parts of the hand. patients with multiple lesions may benefit from simultaneous revascularisation of digital arteries of both hands. the treatment of digital ischemia may be performed by a single- or dual-stage procedure, depending on the extent of the disease [ 20, 47 ]. in the case of a single stage, the spa is usually the first affected artery and often it is the only one that remains patent. the concomitant lesions are then treated. the residual artery is then ligated and the distal portion of the spa is revascularised. this may be performed in a single stage or in two stages. in the first stage, the distal portion of the spa is revascularised by endovascular techniques and the residual artery in the proximal portion is ligated. in the second stage, the endovascular techniques are used to revascularise the spa and digital arteries of the second and third spaces. in the case of a two-stage procedure, an additional graft is inserted before the second stage [ 46 ].

    -

    the endovascular treatment of the digital arteries requires a multidisciplinary team. it is important to have a vascular surgeon involved because the case is usually complicated by the presence of arterial occlusions. the interventional radiologist should be familiar with the endovascular techniques used in the treatment of peripheral arterial occlusive disease. the contribution of a neurosurgeon is important because the neurovascular structures may be involved by arterial lesions and the ischemic lesions of the digital arteries may be associated with distal ulcers. a dermatologist can also be helpful because the ischemic lesions may be located in the nail folds. the use of a psychologist is essential to ensure patients compliance to treatment.

    -

    Digital Workshop Opus Pro 8 Crack


    Download File ✏ ✏ ✏ https://gohhs.com/2uEAyJ



    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/HD Online Player (Video Seks Anak Dengan Ibu Kandung).md b/spaces/scedlatioru/img-to-music/example/HD Online Player (Video Seks Anak Dengan Ibu Kandung).md deleted file mode 100644 index 70cf60c6a9aebe28842e29f9eaef1edd53078ddc..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/HD Online Player (Video Seks Anak Dengan Ibu Kandung).md +++ /dev/null @@ -1,6 +0,0 @@ -

    HD Online Player (Video Seks Anak Dengan Ibu Kandung)


    Download ❤❤❤ https://gohhs.com/2uEzIs



    -
    -Los mejores vídeos porno gay xxx flem porno anak dan ibu kandung están aquí en YouPorn.com. Haz clic y encuentra todas las películas porno xxx flem porno ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/sczhou/ProPainter/RAFT/utils/flow_viz.py b/spaces/sczhou/ProPainter/RAFT/utils/flow_viz.py deleted file mode 100644 index dcee65e89b91b07ee0496aeb4c7e7436abf99641..0000000000000000000000000000000000000000 --- a/spaces/sczhou/ProPainter/RAFT/utils/flow_viz.py +++ /dev/null @@ -1,132 +0,0 @@ -# Flow visualization code used from https://github.com/tomrunia/OpticalFlow_Visualization - - -# MIT License -# -# Copyright (c) 2018 Tom Runia -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to conditions. -# -# Author: Tom Runia -# Date Created: 2018-08-03 - -import numpy as np - -def make_colorwheel(): - """ - Generates a color wheel for optical flow visualization as presented in: - Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007) - URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf - - Code follows the original C++ source code of Daniel Scharstein. - Code follows the the Matlab source code of Deqing Sun. - - Returns: - np.ndarray: Color wheel - """ - - RY = 15 - YG = 6 - GC = 4 - CB = 11 - BM = 13 - MR = 6 - - ncols = RY + YG + GC + CB + BM + MR - colorwheel = np.zeros((ncols, 3)) - col = 0 - - # RY - colorwheel[0:RY, 0] = 255 - colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY) - col = col+RY - # YG - colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG) - colorwheel[col:col+YG, 1] = 255 - col = col+YG - # GC - colorwheel[col:col+GC, 1] = 255 - colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC) - col = col+GC - # CB - colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB) - colorwheel[col:col+CB, 2] = 255 - col = col+CB - # BM - colorwheel[col:col+BM, 2] = 255 - colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM) - col = col+BM - # MR - colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR) - colorwheel[col:col+MR, 0] = 255 - return colorwheel - - -def flow_uv_to_colors(u, v, convert_to_bgr=False): - """ - Applies the flow color wheel to (possibly clipped) flow components u and v. - - According to the C++ source code of Daniel Scharstein - According to the Matlab source code of Deqing Sun - - Args: - u (np.ndarray): Input horizontal flow of shape [H,W] - v (np.ndarray): Input vertical flow of shape [H,W] - convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False. - - Returns: - np.ndarray: Flow visualization image of shape [H,W,3] - """ - flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8) - colorwheel = make_colorwheel() # shape [55x3] - ncols = colorwheel.shape[0] - rad = np.sqrt(np.square(u) + np.square(v)) - a = np.arctan2(-v, -u)/np.pi - fk = (a+1) / 2*(ncols-1) - k0 = np.floor(fk).astype(np.int32) - k1 = k0 + 1 - k1[k1 == ncols] = 0 - f = fk - k0 - for i in range(colorwheel.shape[1]): - tmp = colorwheel[:,i] - col0 = tmp[k0] / 255.0 - col1 = tmp[k1] / 255.0 - col = (1-f)*col0 + f*col1 - idx = (rad <= 1) - col[idx] = 1 - rad[idx] * (1-col[idx]) - col[~idx] = col[~idx] * 0.75 # out of range - # Note the 2-i => BGR instead of RGB - ch_idx = 2-i if convert_to_bgr else i - flow_image[:,:,ch_idx] = np.floor(255 * col) - return flow_image - - -def flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False): - """ - Expects a two dimensional flow image of shape. - - Args: - flow_uv (np.ndarray): Flow UV image of shape [H,W,2] - clip_flow (float, optional): Clip maximum of flow values. Defaults to None. - convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False. - - Returns: - np.ndarray: Flow visualization image of shape [H,W,3] - """ - assert flow_uv.ndim == 3, 'input flow must have three dimensions' - assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]' - if clip_flow is not None: - flow_uv = np.clip(flow_uv, 0, clip_flow) - u = flow_uv[:,:,0] - v = flow_uv[:,:,1] - rad = np.sqrt(np.square(u) + np.square(v)) - rad_max = np.max(rad) - epsilon = 1e-5 - u = u / (rad_max + epsilon) - v = v / (rad_max + epsilon) - return flow_uv_to_colors(u, v, convert_to_bgr) \ No newline at end of file diff --git a/spaces/sdhsdhk/bingo111/src/components/chat-message.tsx b/spaces/sdhsdhk/bingo111/src/components/chat-message.tsx deleted file mode 100644 index bf272d8d7005cfd06c53bd213e09ea217e803549..0000000000000000000000000000000000000000 --- a/spaces/sdhsdhk/bingo111/src/components/chat-message.tsx +++ /dev/null @@ -1,93 +0,0 @@ -import remarkGfm from 'remark-gfm' -import remarkMath from 'remark-math' -import supersub from 'remark-supersub' -import remarkBreaks from 'remark-breaks' -import { cn } from '@/lib/utils' -import { CodeBlock } from '@/components/ui/codeblock' -import { MemoizedReactMarkdown } from '@/components/markdown' -import { LearnMore } from './learn-more' -import { ChatMessageModel } from '@/lib/bots/bing/types' -import { useEffect } from 'react' -import { TurnCounter } from './turn-counter' - -export interface ChatMessageProps { - message: ChatMessageModel -} - -export function ChatMessage({ message, ...props }: ChatMessageProps) { - useEffect(() => { - if (document.body.scrollHeight - window.innerHeight - window.scrollY - 200 < 0) { - window.scrollBy(0, 200) - } - }, [message.text]) - - return message.text ? ( -
    -
    - {obj.alt} - } - } catch (e) { - } - return {obj.alt} - }, - p({ children }) { - return

    {children}

    - }, - code({ node, inline, className, children, ...props }) { - if (children.length) { - if (children[0] == '▍') { - return ( - - ) - } - - children[0] = (children[0] as string).replace('`▍`', '▍') - } - - const match = /language-(\w+)/.exec(className || '') - - if (inline) { - return ( - - {children} - - ) - } - - return ( - - ) - } - }} - > - {message.text} -
    -
    -
    - {message.author === 'bot' && } - {message.author === 'bot' && } -
    -
    - ) : null -} diff --git a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/envfilescheck.bat b/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/envfilescheck.bat deleted file mode 100644 index 645ae0f4d20449a2b7394939cf9e32865eaf7fb1..0000000000000000000000000000000000000000 --- a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/envfilescheck.bat +++ /dev/null @@ -1,230 +0,0 @@ -@echo off && chcp 65001 - -echo working dir is %cd% -echo downloading requirement aria2 check. -echo= -dir /a:d/b | findstr "aria2" > flag.txt -findstr "aria2" flag.txt >nul -if %errorlevel% ==0 ( - echo aria2 checked. - echo= -) else ( - echo failed. please downloading aria2 from webpage! - echo unzip it and put in this directory! - timeout /T 5 - start https://github.com/aria2/aria2/releases/tag/release-1.36.0 - echo= - goto end -) - -echo envfiles checking start. -echo= - -for /f %%x in ('findstr /i /c:"aria2" "flag.txt"') do (set aria2=%%x)&goto endSch -:endSch - -set d32=f0D32k.pth -set d40=f0D40k.pth -set d48=f0D48k.pth -set g32=f0G32k.pth -set g40=f0G40k.pth -set g48=f0G48k.pth - -set dld32=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth -set dld40=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth -set dld48=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth -set dlg32=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth -set dlg40=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth -set dlg48=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth - -set hp2=HP2-人声vocals+非人声instrumentals.pth -set hp5=HP5-主旋律人声vocals+其他instrumentals.pth - -set dlhp2=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -set dlhp5=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth - -set hb=hubert_base.pt - -set dlhb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt - -echo dir check start. -echo= - -if exist "%~dp0pretrained" ( - echo dir .\pretrained checked. - ) else ( - echo failed. generating dir .\pretrained. - mkdir pretrained - ) -if exist "%~dp0uvr5_weights" ( - echo dir .\uvr5_weights checked. - ) else ( - echo failed. generating dir .\uvr5_weights. - mkdir uvr5_weights - ) - -echo= -echo dir check finished. - -echo= -echo required files check start. - -echo checking D32k.pth -if exist "%~dp0pretrained\D32k.pth" ( - echo D32k.pth in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d %~dp0pretrained -o D32k.pth - if exist "%~dp0pretrained\D32k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking D40k.pth -if exist "%~dp0pretrained\D40k.pth" ( - echo D40k.pth in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d %~dp0pretrained -o D40k.pth - if exist "%~dp0pretrained\D40k.pth" (echo download successful.) else (echo please try again! - echo=) - ) - echo checking D48k.pth -if exist "%~dp0pretrained\D48k.pth" ( - echo D48k.pth in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d %~dp0pretrained -o D48k.pth - if exist "%~dp0pretrained\D48k.pth" (echo download successful.) else (echo please try again! - echo=) - ) - echo checking G32k.pth -if exist "%~dp0pretrained\G32k.pth" ( - echo G32k.pth in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d %~dp0pretrained -o G32k.pth - if exist "%~dp0pretrained\G32k.pth" (echo download successful.) else (echo please try again! - echo=) - ) - echo checking G40k.pth -if exist "%~dp0pretrained\G40k.pth" ( - echo G40k.pth in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d %~dp0pretrained -o G40k.pth - if exist "%~dp0pretrained\G40k.pth" (echo download successful.) else (echo please try again! - echo=) - ) - echo checking G48k.pth -if exist "%~dp0pretrained\G48k.pth" ( - echo G48k.pth in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d %~dp0pretrained -o G48k.pth - if exist "%~dp0pretrained\G48k.pth" (echo download successful.) else (echo please try again! - echo=) - ) - -echo checking %d32% -if exist "%~dp0pretrained\%d32%" ( - echo %d32% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld32% -d %~dp0pretrained -o %d32% - if exist "%~dp0pretrained\%d32%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %d40% -if exist "%~dp0pretrained\%d40%" ( - echo %d40% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld40% -d %~dp0pretrained -o %d40% - if exist "%~dp0pretrained\%d40%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %d48% -if exist "%~dp0pretrained\%d48%" ( - echo %d48% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld48% -d %~dp0pretrained -o %d48% - if exist "%~dp0pretrained\%d48%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %g32% -if exist "%~dp0pretrained\%g32%" ( - echo %g32% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg32% -d %~dp0pretrained -o %g32% - if exist "%~dp0pretrained\%g32%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %g40% -if exist "%~dp0pretrained\%g40%" ( - echo %g40% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg40% -d %~dp0pretrained -o %g40% - if exist "%~dp0pretrained\%g40%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %g48% -if exist "%~dp0pretrained\%g48%" ( - echo %g48% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg48% -d %~dp0\pretrained -o %g48% - if exist "%~dp0pretrained\%g48%" (echo download successful.) else (echo please try again! - echo=) - ) - -echo checking %hp2% -if exist "%~dp0uvr5_weights\%hp2%" ( - echo %hp2% in .\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp2% -d %~dp0\uvr5_weights -o %hp2% - if exist "%~dp0uvr5_weights\%hp2%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %hp5% -if exist "%~dp0uvr5_weights\%hp5%" ( - echo %hp5% in .\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp5% -d %~dp0\uvr5_weights -o %HP5% - if exist "%~dp0uvr5_weights\%hp5%" (echo download successful.) else (echo please try again! - echo=) - ) - -echo checking %hb% -if exist "%~dp0%hb%" ( - echo %hb% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhb% -d %~dp0 -o %hb% - if exist "%~dp0%hb%" (echo download successful.) else (echo please try again! - echo=) - ) - -echo required files check finished. -echo envfiles check complete. -pause -:end -del flag.txt \ No newline at end of file diff --git a/spaces/shiwan10000/CodeFormer/CodeFormer/facelib/detection/matlab_cp2tform.py b/spaces/shiwan10000/CodeFormer/CodeFormer/facelib/detection/matlab_cp2tform.py deleted file mode 100644 index b2a8b54a91709c71437e15c68d3be9a9b0a20a34..0000000000000000000000000000000000000000 --- a/spaces/shiwan10000/CodeFormer/CodeFormer/facelib/detection/matlab_cp2tform.py +++ /dev/null @@ -1,317 +0,0 @@ -import numpy as np -from numpy.linalg import inv, lstsq -from numpy.linalg import matrix_rank as rank -from numpy.linalg import norm - - -class MatlabCp2tormException(Exception): - - def __str__(self): - return 'In File {}:{}'.format(__file__, super.__str__(self)) - - -def tformfwd(trans, uv): - """ - Function: - ---------- - apply affine transform 'trans' to uv - - Parameters: - ---------- - @trans: 3x3 np.array - transform matrix - @uv: Kx2 np.array - each row is a pair of coordinates (x, y) - - Returns: - ---------- - @xy: Kx2 np.array - each row is a pair of transformed coordinates (x, y) - """ - uv = np.hstack((uv, np.ones((uv.shape[0], 1)))) - xy = np.dot(uv, trans) - xy = xy[:, 0:-1] - return xy - - -def tforminv(trans, uv): - """ - Function: - ---------- - apply the inverse of affine transform 'trans' to uv - - Parameters: - ---------- - @trans: 3x3 np.array - transform matrix - @uv: Kx2 np.array - each row is a pair of coordinates (x, y) - - Returns: - ---------- - @xy: Kx2 np.array - each row is a pair of inverse-transformed coordinates (x, y) - """ - Tinv = inv(trans) - xy = tformfwd(Tinv, uv) - return xy - - -def findNonreflectiveSimilarity(uv, xy, options=None): - options = {'K': 2} - - K = options['K'] - M = xy.shape[0] - x = xy[:, 0].reshape((-1, 1)) # use reshape to keep a column vector - y = xy[:, 1].reshape((-1, 1)) # use reshape to keep a column vector - - tmp1 = np.hstack((x, y, np.ones((M, 1)), np.zeros((M, 1)))) - tmp2 = np.hstack((y, -x, np.zeros((M, 1)), np.ones((M, 1)))) - X = np.vstack((tmp1, tmp2)) - - u = uv[:, 0].reshape((-1, 1)) # use reshape to keep a column vector - v = uv[:, 1].reshape((-1, 1)) # use reshape to keep a column vector - U = np.vstack((u, v)) - - # We know that X * r = U - if rank(X) >= 2 * K: - r, _, _, _ = lstsq(X, U, rcond=-1) - r = np.squeeze(r) - else: - raise Exception('cp2tform:twoUniquePointsReq') - sc = r[0] - ss = r[1] - tx = r[2] - ty = r[3] - - Tinv = np.array([[sc, -ss, 0], [ss, sc, 0], [tx, ty, 1]]) - T = inv(Tinv) - T[:, 2] = np.array([0, 0, 1]) - - return T, Tinv - - -def findSimilarity(uv, xy, options=None): - options = {'K': 2} - - # uv = np.array(uv) - # xy = np.array(xy) - - # Solve for trans1 - trans1, trans1_inv = findNonreflectiveSimilarity(uv, xy, options) - - # Solve for trans2 - - # manually reflect the xy data across the Y-axis - xyR = xy - xyR[:, 0] = -1 * xyR[:, 0] - - trans2r, trans2r_inv = findNonreflectiveSimilarity(uv, xyR, options) - - # manually reflect the tform to undo the reflection done on xyR - TreflectY = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]]) - - trans2 = np.dot(trans2r, TreflectY) - - # Figure out if trans1 or trans2 is better - xy1 = tformfwd(trans1, uv) - norm1 = norm(xy1 - xy) - - xy2 = tformfwd(trans2, uv) - norm2 = norm(xy2 - xy) - - if norm1 <= norm2: - return trans1, trans1_inv - else: - trans2_inv = inv(trans2) - return trans2, trans2_inv - - -def get_similarity_transform(src_pts, dst_pts, reflective=True): - """ - Function: - ---------- - Find Similarity Transform Matrix 'trans': - u = src_pts[:, 0] - v = src_pts[:, 1] - x = dst_pts[:, 0] - y = dst_pts[:, 1] - [x, y, 1] = [u, v, 1] * trans - - Parameters: - ---------- - @src_pts: Kx2 np.array - source points, each row is a pair of coordinates (x, y) - @dst_pts: Kx2 np.array - destination points, each row is a pair of transformed - coordinates (x, y) - @reflective: True or False - if True: - use reflective similarity transform - else: - use non-reflective similarity transform - - Returns: - ---------- - @trans: 3x3 np.array - transform matrix from uv to xy - trans_inv: 3x3 np.array - inverse of trans, transform matrix from xy to uv - """ - - if reflective: - trans, trans_inv = findSimilarity(src_pts, dst_pts) - else: - trans, trans_inv = findNonreflectiveSimilarity(src_pts, dst_pts) - - return trans, trans_inv - - -def cvt_tform_mat_for_cv2(trans): - """ - Function: - ---------- - Convert Transform Matrix 'trans' into 'cv2_trans' which could be - directly used by cv2.warpAffine(): - u = src_pts[:, 0] - v = src_pts[:, 1] - x = dst_pts[:, 0] - y = dst_pts[:, 1] - [x, y].T = cv_trans * [u, v, 1].T - - Parameters: - ---------- - @trans: 3x3 np.array - transform matrix from uv to xy - - Returns: - ---------- - @cv2_trans: 2x3 np.array - transform matrix from src_pts to dst_pts, could be directly used - for cv2.warpAffine() - """ - cv2_trans = trans[:, 0:2].T - - return cv2_trans - - -def get_similarity_transform_for_cv2(src_pts, dst_pts, reflective=True): - """ - Function: - ---------- - Find Similarity Transform Matrix 'cv2_trans' which could be - directly used by cv2.warpAffine(): - u = src_pts[:, 0] - v = src_pts[:, 1] - x = dst_pts[:, 0] - y = dst_pts[:, 1] - [x, y].T = cv_trans * [u, v, 1].T - - Parameters: - ---------- - @src_pts: Kx2 np.array - source points, each row is a pair of coordinates (x, y) - @dst_pts: Kx2 np.array - destination points, each row is a pair of transformed - coordinates (x, y) - reflective: True or False - if True: - use reflective similarity transform - else: - use non-reflective similarity transform - - Returns: - ---------- - @cv2_trans: 2x3 np.array - transform matrix from src_pts to dst_pts, could be directly used - for cv2.warpAffine() - """ - trans, trans_inv = get_similarity_transform(src_pts, dst_pts, reflective) - cv2_trans = cvt_tform_mat_for_cv2(trans) - - return cv2_trans - - -if __name__ == '__main__': - """ - u = [0, 6, -2] - v = [0, 3, 5] - x = [-1, 0, 4] - y = [-1, -10, 4] - - # In Matlab, run: - # - # uv = [u'; v']; - # xy = [x'; y']; - # tform_sim=cp2tform(uv,xy,'similarity'); - # - # trans = tform_sim.tdata.T - # ans = - # -0.0764 -1.6190 0 - # 1.6190 -0.0764 0 - # -3.2156 0.0290 1.0000 - # trans_inv = tform_sim.tdata.Tinv - # ans = - # - # -0.0291 0.6163 0 - # -0.6163 -0.0291 0 - # -0.0756 1.9826 1.0000 - # xy_m=tformfwd(tform_sim, u,v) - # - # xy_m = - # - # -3.2156 0.0290 - # 1.1833 -9.9143 - # 5.0323 2.8853 - # uv_m=tforminv(tform_sim, x,y) - # - # uv_m = - # - # 0.5698 1.3953 - # 6.0872 2.2733 - # -2.6570 4.3314 - """ - u = [0, 6, -2] - v = [0, 3, 5] - x = [-1, 0, 4] - y = [-1, -10, 4] - - uv = np.array((u, v)).T - xy = np.array((x, y)).T - - print('\n--->uv:') - print(uv) - print('\n--->xy:') - print(xy) - - trans, trans_inv = get_similarity_transform(uv, xy) - - print('\n--->trans matrix:') - print(trans) - - print('\n--->trans_inv matrix:') - print(trans_inv) - - print('\n---> apply transform to uv') - print('\nxy_m = uv_augmented * trans') - uv_aug = np.hstack((uv, np.ones((uv.shape[0], 1)))) - xy_m = np.dot(uv_aug, trans) - print(xy_m) - - print('\nxy_m = tformfwd(trans, uv)') - xy_m = tformfwd(trans, uv) - print(xy_m) - - print('\n---> apply inverse transform to xy') - print('\nuv_m = xy_augmented * trans_inv') - xy_aug = np.hstack((xy, np.ones((xy.shape[0], 1)))) - uv_m = np.dot(xy_aug, trans_inv) - print(uv_m) - - print('\nuv_m = tformfwd(trans_inv, xy)') - uv_m = tformfwd(trans_inv, xy) - print(uv_m) - - uv_m = tforminv(trans, xy) - print('\nuv_m = tforminv(trans, xy)') - print(uv_m) diff --git a/spaces/shubhamjaiswar/RakshakReet-SpamDetection/app.py b/spaces/shubhamjaiswar/RakshakReet-SpamDetection/app.py deleted file mode 100644 index cd017e56e52fb95ff0f06eeb0b70a137a5e5829a..0000000000000000000000000000000000000000 --- a/spaces/shubhamjaiswar/RakshakReet-SpamDetection/app.py +++ /dev/null @@ -1,25 +0,0 @@ -import gradio as gr -import re -import joblib -p_spam = joblib.load("p_spam.pkl") -p_non_spam = joblib.load("p_non_spam.pkl") -parameters_spam = joblib.load("parameters_spam.pkl") -parameters_non_spam = joblib.load("parameters_non_spam.pkl") -def classify(message): - message = re.sub("\W", " ", message) - message = message.lower().split() - p_spam_given_message = p_spam - p_non_spam_given_message = p_non_spam - for word in message: - if word in parameters_spam: - p_spam_given_message *= parameters_spam[word] - if word in parameters_non_spam: - p_non_spam_given_message *= parameters_non_spam[word] - if p_spam_given_message > p_non_spam_given_message: - return "Spam" - elif p_non_spam_given_message > p_spam_given_message: - return "Non-Spam" - else: - return "Equal probabilities, human needed to classify this!" -ir = gr.Interface(classify,inputs="text",outputs="text") -ir.launch() \ No newline at end of file diff --git a/spaces/simpie28/VITS-Umamusume-voice-synthesizer/text/mandarin.py b/spaces/simpie28/VITS-Umamusume-voice-synthesizer/text/mandarin.py deleted file mode 100644 index 093d8826809aa2681f6088174427337a59e0c882..0000000000000000000000000000000000000000 --- a/spaces/simpie28/VITS-Umamusume-voice-synthesizer/text/mandarin.py +++ /dev/null @@ -1,329 +0,0 @@ -import os -import sys -import re -from pypinyin import lazy_pinyin, BOPOMOFO -import jieba -import cn2an -import logging - -logging.getLogger('jieba').setLevel(logging.WARNING) -jieba.initialize() - - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - -# List of (romaji, ipa) pairs: -_romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ʃy', 'ʃ'), - ('ʧʰy', 'ʧʰ'), - ('ʧ⁼y', 'ʧ⁼'), - ('NN', 'n'), - ('Ng', 'ŋ'), - ('y', 'j'), - ('h', 'x') -]] - -# List of (bopomofo, ipa) pairs: -_bopomofo_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'x'), - ('ㄐ', 'tʃ⁼'), - ('ㄑ', 'tʃʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ts`⁼'), - ('ㄔ', 'ts`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ts⁼'), - ('ㄘ', 'tsʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'ɛ'), - ('ㄞ', 'aɪ'), - ('ㄟ', 'eɪ'), - ('ㄠ', 'ɑʊ'), - ('ㄡ', 'oʊ'), - ('ㄧㄢ', 'jɛn'), - ('ㄩㄢ', 'ɥæn'), - ('ㄢ', 'an'), - ('ㄧㄣ', 'in'), - ('ㄩㄣ', 'ɥn'), - ('ㄣ', 'ən'), - ('ㄤ', 'ɑŋ'), - ('ㄧㄥ', 'iŋ'), - ('ㄨㄥ', 'ʊŋ'), - ('ㄩㄥ', 'jʊŋ'), - ('ㄥ', 'əŋ'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - -# List of (bopomofo, ipa2) pairs: -_bopomofo_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'pwo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'tɕ'), - ('ㄑ', 'tɕʰ'), - ('ㄒ', 'ɕ'), - ('ㄓ', 'tʂ'), - ('ㄔ', 'tʂʰ'), - ('ㄕ', 'ʂ'), - ('ㄖ', 'ɻ'), - ('ㄗ', 'ts'), - ('ㄘ', 'tsʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ɤ'), - ('ㄝ', 'ɛ'), - ('ㄞ', 'aɪ'), - ('ㄟ', 'eɪ'), - ('ㄠ', 'ɑʊ'), - ('ㄡ', 'oʊ'), - ('ㄧㄢ', 'jɛn'), - ('ㄩㄢ', 'yæn'), - ('ㄢ', 'an'), - ('ㄧㄣ', 'in'), - ('ㄩㄣ', 'yn'), - ('ㄣ', 'ən'), - ('ㄤ', 'ɑŋ'), - ('ㄧㄥ', 'iŋ'), - ('ㄨㄥ', 'ʊŋ'), - ('ㄩㄥ', 'jʊŋ'), - ('ㄥ', 'ɤŋ'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'y'), - ('ˉ', '˥'), - ('ˊ', '˧˥'), - ('ˇ', '˨˩˦'), - ('ˋ', '˥˩'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - - -def number_to_chinese(text): - numbers = re.findall(r'\d+(?:\.?\d+)?', text) - for number in numbers: - text = text.replace(number, cn2an.an2cn(number), 1) - return text - - -def chinese_to_bopomofo(text): - text = text.replace('、', ',').replace(';', ',').replace(':', ',') - words = jieba.lcut(text, cut_all=False) - text = '' - for word in words: - bopomofos = lazy_pinyin(word, BOPOMOFO) - if not re.search('[\u4e00-\u9fff]', word): - text += word - continue - for i in range(len(bopomofos)): - bopomofos[i] = re.sub(r'([\u3105-\u3129])$', r'\1ˉ', bopomofos[i]) - if text != '': - text += ' ' - text += ''.join(bopomofos) - return text - - -def latin_to_bopomofo(text): - for regex, replacement in _latin_to_bopomofo: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_romaji(text): - for regex, replacement in _bopomofo_to_romaji: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_ipa(text): - for regex, replacement in _bopomofo_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_ipa2(text): - for regex, replacement in _bopomofo_to_ipa2: - text = re.sub(regex, replacement, text) - return text - - -def chinese_to_romaji(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_romaji(text) - text = re.sub('i([aoe])', r'y\1', text) - text = re.sub('u([aoəe])', r'w\1', text) - text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', - r'\1ɹ`\2', text).replace('ɻ', 'ɹ`') - text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text) - return text - - -def chinese_to_lazy_ipa(text): - text = chinese_to_romaji(text) - for regex, replacement in _romaji_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def chinese_to_ipa(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_ipa(text) - text = re.sub('i([aoe])', r'j\1', text) - text = re.sub('u([aoəe])', r'w\1', text) - text = re.sub('([sɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', - r'\1ɹ`\2', text).replace('ɻ', 'ɹ`') - text = re.sub('([s][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text) - return text - - -def chinese_to_ipa2(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_ipa2(text) - text = re.sub(r'i([aoe])', r'j\1', text) - text = re.sub(r'u([aoəe])', r'w\1', text) - text = re.sub(r'([ʂɹ]ʰ?)([˩˨˧˦˥ ]+|$)', r'\1ʅ\2', text) - text = re.sub(r'(sʰ?)([˩˨˧˦˥ ]+|$)', r'\1ɿ\2', text) - return text \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Bad 2 Bad Extinction Mod Apk - The Ultimate RPG with All Characters Unlocked.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Bad 2 Bad Extinction Mod Apk - The Ultimate RPG with All Characters Unlocked.md deleted file mode 100644 index e0dde88b71f6025cbbda96f8da2a1dd07bb5e39b..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Bad 2 Bad Extinction Mod Apk - The Ultimate RPG with All Characters Unlocked.md +++ /dev/null @@ -1,80 +0,0 @@ -
    -

    Bad 2 Bad Extinction Mod APK Unlock All Characters

    -

    If you are a fan of action games with animal heroes, zombies, and mutants, you might want to check out Bad 2 Bad Extinction. This is a thrilling game that will keep you on the edge of your seat as you fight for survival in a post-apocalyptic world. And if you want to make the game even more fun and exciting, you can download the Bad 2 Bad Extinction Mod APK and unlock all characters, skins, money, and gems. In this article, we will tell you everything you need to know about this modded version of the game, including what it is, how to get it, and why you should try it.

    -

    bad 2 bad extinction mod apk unlock all characters


    DOWNLOAD ››› https://ssurll.com/2uO06D



    -

    What is Bad 2 Bad Extinction?

    -

    Bad 2 Bad Extinction is a sequel to the popular game Bad 2 Bad: Delta, which was released in 2018. It is a side-scrolling action game that features animal heroes who fight against zombies, mutants, and other enemies in a post-apocalyptic world. The game has a unique style and humor that makes it stand out from other games in the genre.

    -

    A thrilling action game with animal heroes

    -

    The game lets you choose from different animal characters, such as bears, wolves, pandas, tigers, and more. Each character has its own skills and abilities that you can use in combat. You can also customize your character with various outfits, accessories, and weapons. The game has a simple control system that allows you to move, shoot, aim, reload, and use items with ease.

    -

    A post-apocalyptic world with zombies and mutants

    -

    The game takes place in a world that has been devastated by a virus that turns humans into zombies and animals into mutants. You have to fight your way through different locations, such as cities, deserts, forests, and underground bases. You will encounter different types of enemies, such as zombies, mutants, bandits, robots, and bosses. You will also have to complete various missions, such as rescuing survivors, collecting resources, destroying enemy bases, and more.

    -

    bad 2 bad extinction mod apk free shopping and characters
    -bad 2 bad extinction hack apk unlimited money and heroes
    -bad 2 bad extinction modded apk download all unlocked
    -bad 2 bad extinction cheat apk latest version and characters
    -bad 2 bad extinction premium apk full access and characters
    -bad 2 bad extinction cracked apk no ads and characters
    -bad 2 bad extinction mod apk android 1 all characters
    -bad 2 bad extinction hack apk rexdl free characters
    -bad 2 bad extinction modded apk revdl all unlocked
    -bad 2 bad extinction cheat apk happymod and characters
    -bad 2 bad extinction premium apk apkpure full access
    -bad 2 bad extinction cracked apk apkmody no ads
    -bad 2 bad extinction mod apk offline all characters
    -bad 2 bad extinction hack apk online free characters
    -bad 2 bad extinction modded apk update all unlocked
    -bad 2 bad extinction cheat apk old version and characters
    -bad 2 bad extinction premium apk new version full access
    -bad 2 bad extinction cracked apk original no ads
    -bad 2 bad extinction mod apk unlimited everything and characters
    -bad 2 bad extinction hack apk mega mod free characters
    -bad 2 bad extinction modded apk god mode all unlocked
    -bad 2 bad extinction cheat apk infinite ammo and characters
    -bad 2 bad extinction premium apk pro version full access
    -bad 2 bad extinction cracked apk vip version no ads
    -bad 2 bad extinction mod apk obb data all characters
    -bad 2 bad extinction hack apk zip file free characters
    -bad 2 bad extinction modded apk xapk file all unlocked
    -bad 2 bad extinction cheat apk no root and characters
    -bad 2 bad extinction premium apk with root full access
    -bad 2 bad extinction cracked apk without root no ads

    -

    A variety of missions, weapons, and customization options

    -

    The game offers a lot of content and features that will keep you entertained for hours. You can play the game in different modes, such as story mode, survival mode, raid mode, and online mode. You can also collect and upgrade different weapons, such as pistols, rifles, shotguns, snipers, rocket launchers, grenades, and more. You can also customize your character with different skins, hats, masks, glasses, backpacks, and more. The game has a lot of items and rewards that you can earn by playing the game or by watching ads.

    -

    What is Bad 2 Bad Extinction Mod APK?

    -

    Bad 2 Bad Extinction Mod APK is a modified version of the original game that gives you some extra benefits and features that are not available in the official version. These include:

    -

    A modified version of the original game

    -

    The modded version of the game is created by third-party developers who modify the original game files to add or remove some features. The modded version of the game is not authorized or endorsed by the official developers or publishers of the game. Therefore, you should download and install it at your own risk.

    -

    A way to get unlimited money and gems

    -

    The modded version of the game gives you unlimited money and gems that you can use to buy or upgrade anything in the game. You can also use the money and gems to unlock all the characters and skins that are otherwise locked or require real money to purchase. This way, you can enjoy the game without any limitations or restrictions.

    -

    A way to unlock all characters and skins

    -

    The modded version of the game also gives you access to all the characters and skins that are available in the game. You can choose from different animal heroes, such as bears, wolves, pandas, tigers, and more. You can also customize your character with different outfits, accessories, and weapons. You can change your character's appearance anytime you want and create your own unique style.

    -

    How to download and install Bad 2 Bad Extinction Mod APK?

    -

    If you want to download and install the Bad 2 Bad Extinction Mod APK, you will need to follow some simple steps. Here is how you can do it:

    -

    The steps to follow

    -

    First, you will need to find a reliable source that offers the modded version of the game. You can search online for websites or blogs that provide the download link for the Bad 2 Bad Extinction Mod APK. Make sure that the source is trustworthy and safe, as some sources may contain viruses or malware that can harm your device. You can also check the reviews and ratings of the source before downloading the file.

    -

    Second, you will need to download the file to your device. The file size may vary depending on the source, but it should not be too large. You will need to have enough storage space on your device to save the file. You will also need to have a stable internet connection to download the file without any interruptions.

    -

    Third, you will need to install the file on your device. To do this, you will need to enable the installation of apps from unknown sources on your device settings. This will allow you to install apps that are not from the official app store. You can find this option under security or privacy settings on your device. Once you enable this option, you can locate the downloaded file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish.

    -

    The permissions to allow

    -

    During the installation process, you may be asked to allow some permissions for the app to function properly. These permissions may include access to your device's storage, camera, microphone, location, contacts, and more. You can choose to allow or deny these permissions according to your preference. However, some permissions may be necessary for the app to work correctly, so make sure that you do not deny any essential permissions.

    -

    The benefits to enjoy

    -

    After installing the app, you can launch it and start playing the game. You will notice that you have unlimited money and gems in your account that you can use to buy or upgrade anything in the game. You will also notice that you have access to all the characters and skins that are available in the game. You can choose any character and skin that you like and customize your character with different outfits, accessories, and weapons. You can also play the game in different modes, such as story mode, survival mode, raid mode, and online mode. You can enjoy the game without any limitations or restrictions.

    -

    Conclusion

    -

    Bad 2 Bad Extinction is a fun and exciting action game that features animal heroes who fight against zombies, mutants, and other enemies in a post-apocalyptic world. The game has a lot of content and features that will keep you entertained for hours. However, if you want to make the game even more fun and exciting, you can download the Bad 2 Bad Extinction Mod APK and unlock all characters, skins, money, and gems. This way, you can enjoy the game without any limitations or restrictions.

    -

    If you are interested in trying out this modded version of the game, you can follow the steps above to download and install it on your device. Make sure that you download it from a reliable source and allow the necessary permissions for it to work properly. Then, you can launch it and start playing it with unlimited money and gems and access to all characters and skins.

    -

    We hope that this article has helped you learn more about Bad 2 Bad Extinction Mod APK Unlock All Characters. If you have any questions or feedback, feel free to leave a comment below.

    -

    FAQs

    -

    Here are some frequently asked questions about Bad 2 Bad Extinction Mod APK Unlock All Characters:

    -
      -
    • Is Bad 2 Bad Extinction Mod APK safe?
    • -

      Bad 2 Bad Extinction Mod APK is safe as long as you download it from a trustworthy source that does not contain any viruses or malware. However, since it is a modified version of the original game, it is not authorized or endorsed by the official developers or publishers of the game. Therefore, you should download and install it at your own risk and discretion.

      -
    • Does Bad 2 Bad Extinction Mod APK work on all devices?
    • -

      Bad 2 Bad Extinction Mod APK should work on most devices that support the original game. However, some devices may not be compatible with the modded version of the game due to different specifications or settings. Therefore, you should check the compatibility of your device before downloading and installing the modded version of the game.

      -
    • Can I play Bad 2 Bad Extinction Mod APK online with other players?
    • -

      Bad 2 Bad Extinction Mod APK allows you to play online with other players who have the same modded version of the game. However, you may not be able to play online with players who have the official version of the game, as they may have different features and updates. Therefore, you should be aware of this limitation before playing online with other players.

      -
    • Will I get banned for using Bad 2 Bad Extinction Mod APK?
    • -

      There is a possibility that you may get banned for using Bad 2 Bad Extinction Mod APK, as it is a modified version of the original game that gives you an unfair advantage over other players. The official developers or publishers of the game may detect your use of the modded version of the game and take action against you. Therefore, you should use the modded version of the game at your own risk and responsibility.

      -
    • Can I update Bad 2 Bad Extinction Mod APK?
    • -

      Bad 2 Bad Extinction Mod APK may not be compatible with the latest updates and patches of the original game. Therefore, you may not be able to update the modded version of the game without losing its features and benefits. If you want to update the modded version of the game, you will need to find a new source that offers the updated modded version of the game and download and install it again.

      -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Call Recording APK for Android 12 The Ultimate Guide to Record Phone Calls.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Call Recording APK for Android 12 The Ultimate Guide to Record Phone Calls.md deleted file mode 100644 index 84027a9a0444b420f47b0c38bf8053a2a607ba25..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Call Recording APK for Android 12 The Ultimate Guide to Record Phone Calls.md +++ /dev/null @@ -1,165 +0,0 @@ - -

    How to Choose and Install the Best Call Recording Apps for Android 12

    -

    Call recording is a feature that allows you to record your phone conversations and save them as audio files on your device. You can use call recording for various purposes, such as:

    -

    call recording apk for android 12


    Download Zip ✸✸✸ https://ssurll.com/2uNYYQ



    -
      -
    • Keeping records of important information or agreements
    • -
    • Improving your customer service or sales skills
    • -
    • Complying with legal regulations or industry standards
    • -
    • Resolving disputes or complaints
    • -
    • Capturing feedback or testimonials
    • -
    -

    However, call recording on Android is not as simple as it sounds. Due to privacy concerns, Google has restricted the ability of third-party apps to access the microphone and phone calls on Android devices. This means that many call recording apps may not work properly on Android 12, or may require special permissions or settings to function.

    -

    In this article, we will help you choose and install the best call recording apps for Android 12. We will compare the features, pros, and cons of five popular and reliable call recording apps that are compatible with Android 12. We will also explain how to install APK files from outside the Play Store, how to enable unknown sources and accessibility permissions for call recording apps, how to configure and customize the settings and options of each app, and how to access, manage, and share your recorded calls.

    -

    Best Call Recording Apps for Android 12

    -

    There are many call recording apps available for Android devices, but not all of them are compatible with Android 12. Some apps may only work with certain phone models or regions, while others may require root access or additional plug-ins. Some apps may also have limited features or functionality, such as not being able to record VoIP calls from apps like WhatsApp, Skype, or Facebook Messenger.

    -

    To help you find the best call recording app for your needs, we have selected five popular and reliable call recording apps that work well on Android 12. We have compared their features, pros, and cons based on the following criteria:

    -
      -
    • Compatibility: The app should be compatible with most Android devices running Android 12.
    • -
    • Functionality: The app should be able to record both incoming and outgoing calls from any source, including VoIP calls from other apps.
    • -
    • Quality: The app should be able to record calls in high-quality audio formats, such as MP3 or WAV.
    • -
    • Usability: The app should be easy to use and configure, with a simple and intuitive interface.
    • -
    • Privacy: The app should respect your privacy and security, by not collecting or sharing your personal data or recordings without your consent.
    • -
    -

    Here are the five best call recording apps for Android 12 that we have chosen:

    -

    ACR Phone +

    -

    ACR Phone + is a powerful call recorder app that offers a lot of features and customization options.

    Some of the features, pros, and cons of ACR Phone + are:

    -
      -
    • High-quality call recording: ACR Phone ensures crystal-clear call recording, capturing every detail of your conversations. You can choose between MP3 or WAV formats, and adjust the bitrate and sampling rate to your preference.
    • -
    • Automatic call recording: With ACR Phone, you don't have to start recording each call manually. You can set up rules to record all calls, or only calls from specific contacts or numbers. You can also exclude contacts or numbers from being recorded.
    • -
    • Easy-to-use interface: ACR Phone is designed with user-friendliness in mind. You can access your recordings from the app's main screen, or from the notification bar. You can also search, sort, filter, and rename your recordings easily.
    • -
    • Organize and share recordings: The app allows you to organize your recorded calls efficiently. You can create folders, add notes, and mark important recordings as favorites. You can also share your recordings via email, cloud services, or social media.
    • -
    • Privacy: The app respects your privacy and security, by not collecting or sharing your personal data or recordings without your consent. You can also protect your recordings with a PIN lock or biometric authentication.
    • -
    • Pros: ACR Phone is a powerful and versatile call recorder app that offers a lot of features and customization options. It supports both cellular and VoIP calls, and works well on Android 12 devices. It also has a free version with some limitations.
    • -
    • Cons: ACR Phone is not available on the Google Play Store, so you have to download it from the developer's website or other sources. This may pose some security risks or compatibility issues. The app also requires accessibility permissions to work properly.
    • -
    -

    Cube ACR

    -

    Cube ACR is another popular call recorder app that works well on Android 12 devices. It can record both cellular and VoIP calls from various apps, such as WhatsApp, Skype, Viber, Telegram, and more.

    -
      -
    • Functionality: Cube ACR can record both incoming and outgoing calls from any source, including VoIP calls from other apps. It supports automatic call recording based on rules, as well as manual call recording with a floating widget .
    • -
    • Quality: Cube ACR can record calls in high-quality audio formats, such as MP3 or OGG. You can also adjust the audio source and quality settings to suit your needs .
    • -
    • Usability: Cube ACR has a simple and intuitive interface that lets you access and manage your recordings easily. You can also search, sort, filter, and backup your recordings .
    • -
    • Privacy: Cube ACR does not collect or share your personal data or recordings without your permission. You can also lock your recordings with a PIN code or fingerprint .
    • -
    • Pros: Cube ACR is one of the few call recorder apps that can record VoIP calls from other apps on Android 12 devices. It has a lot of features and options to customize your recording experience. It also has a free version with some ads and limitations.
    • -
    • Cons: Cube ACR may not work on some devices or regions due to carrier restrictions or compatibility issues. It also requires accessibility permissions and notification access to work properly .
    • -
    -

    CallU

    -

    CallU is a simple and effective call recorder app that works on Android 12 devices. It can record both incoming and outgoing calls automatically or manually.

    -
      -
    • Functionality: CallU can record both incoming and outgoing calls automatically or manually. You can choose to record all calls, or only calls from specific contacts or numbers. You can also exclude contacts or numbers from being recorded.
    • -
    • Quality: CallU can record calls in high-quality audio formats, such as MP3 or WAV. You can also adjust the audio source and quality settings to suit your needs.
    • -
    • Usability: CallU has a simple and easy-to-use interface that lets you access and manage your recordings easily. You can also search, sort, filter, rename, and backup your recordings.
    • -
    • Privacy: CallU does not collect or share your personal data or recordings without your permission. You can also protect your recordings with a password or fingerprint.
    • -
    • Pros: CallU is a simple and effective call recorder app that works well on Android 12 devices. It has a minimalistic design and a user-friendly interface. It also has a free version with some ads and limitations.
    • -
    • Cons: CallU may not work on some devices or regions due to carrier restrictions or compatibility issues. It also requires accessibility permissions and notification access to work properly.
    • -
    -

    TrueCall

    -

    TrueCall is a smart and reliable call recorder app that works on Android 12 devices. It can record both incoming and outgoing calls automatically or manually, and also identify unknown callers and block spam calls.

    -

    best call recording app for android 12
    -how to record phone calls on android 12
    -android 12 call recorder apk download
    -call recording software for android 12
    -free call recorder for android 12
    -automatic call recorder for android 12
    -hidden call recorder for android 12
    -call recording settings in android 12
    -voice call recorder for android 12
    -call recording feature in android 12
    -call recording option in android 12
    -call recording permission in android 12
    -call recording problem in android 12
    -call recording solution for android 12
    -call recording support for android 12
    -call recording quality on android 12
    -call recording backup on android 12
    -call recording cloud storage for android 12
    -call recording encryption for android 12
    -call recording security for android 12
    -call recording privacy for android 12
    -call recording laws for android 12
    -call recording tips and tricks for android 12
    -call recording reviews for android 12
    -call recording comparison for android 12
    -best call recording apps for android 2023
    -top call recording apps for android 2023
    -new call recording apps for android 2023
    -latest call recording apps for android 2023
    -upcoming call recording apps for android 2023
    -best rated call recording apps for android 2023
    -most downloaded call recording apps for android 2023
    -most popular call recording apps for android 2023
    -most reliable call recording apps for android 2023
    -most secure call recording apps for android 2023
    -best free call recording apps for android 2023
    -best paid call recording apps for android 2023
    -best offline call recording apps for android 2023
    -best online call recording apps for android 2023
    -best cloud-based call recording apps for android 2023
    -best encrypted call recording apps for android 2023
    -best hidden call recording apps for android 2023
    -best automatic call recording apps for android 2023
    -best manual call recording apps for android 2023
    -best voice quality call recording apps for android 2023
    -best backup and restore call recording apps for android 2023
    -best easy to use call recording apps for android 2023
    -best customizable call recording apps for android 2023
    -best advanced call recording apps for android 2023

    -
      -
    • Functionality: TrueCall can record both incoming and outgoing calls automatically or manually. You can choose to record all calls, or only calls from specific contacts or numbers. You can also exclude contacts or numbers from being recorded.
    • -
    • Quality: TrueCall can record calls in high-quality audio formats, such as MP3 or WAV. You can also adjust the audio source and quality settings to suit your needs.
    • -
    • Usability: TrueCall has a smart and intuitive interface that lets you access and manage your recordings easily. You can also search, sort, filter, rename, and backup your recordings.
    • -
    • Privacy: TrueCall does not collect or share your personal data or recordings without your permission. You can also protect your recordings with a password or fingerprint.
    • -
    • Pros: TrueCall is a smart and reliable call recorder app that works well on Android 12 devices. It has a lot of features and options to customize your recording experience. It also has a caller ID feature that can identify unknown callers and block spam calls. It also has a free version with some ads and limitations.
    • -
    • Cons: TrueCall may not work on some devices or regions due to carrier restrictions or compatibility issues. It also requires accessibility permissions and notification access to work properly.
    • -
    -

    Your stock phone dialer

    -

    Your stock phone dialer is the default phone app that comes pre-installed on your Android device. Depending on your device model and manufacturer, it may have a built-in call recording feature that works on Android 12 devices.

    -
      -
    • Functionality: Your stock phone dialer may have a call recording feature that allows you to record both incoming and outgoing calls manually. You may see a record button on the call screen, or you may have to tap the menu button to access the option .
    • -
    • Quality: Your stock phone dialer may record calls in high-quality audio formats, such as MP3 or WAV. However, you may not be able to adjust the audio source and quality settings to suit your needs .
    • -
    • Usability: Your stock phone dialer may have a simple and easy-to-use interface that lets you access and manage your recordings easily. However, you may not be able to search, sort, filter, rename, or backup your recordings .
    • -
    • Privacy: Your stock phone dialer may respect your privacy and security, by not collecting or sharing your personal data or recordings without your consent. However, you may not be able to protect your recordings with a password or fingerprint .
    • -
    • Pros: Your stock phone dialer is the easiest and most convenient way to record calls on Android 12 devices. You don't have to install any third-party apps or grant any permissions. You also don't have to worry about compatibility issues or carrier restrictions.
    • -
    • Cons: Your stock phone dialer may not have a call recording feature at all, depending on your device model and manufacturer. Even if it does, it may have limited features and functionality, such as not being able to record VoIP calls from other apps, or not being able to customize your recording experience.
    • -
    -

    How to Install and Use Call Recording Apps on Android 12

    -

    If you decide to use one of the third-party call recording apps mentioned above, you will need to install them from outside the Google Play Store, as they are not available there due to Google's policies. This means that you will need to download APK files from the developer's website or other sources, and install them manually on your device.

    -

    To install APK files from outside the Play Store, you will need to follow these steps:

    -
      -
    1. Download the APK file of the call recording app of your choice from the developer's website or other sources. Make sure that the source is trustworthy and secure, as some APK files may contain malware or viruses.
    2. Go to the Settings app on your device, and tap on Security or Privacy. Then, enable the option to allow installation of apps from unknown sources. This will allow you to install APK files that are not from the Play Store.
    3. -
    4. Locate the APK file that you downloaded on your device, and tap on it to start the installation process. You may see a warning message that the app may harm your device, but you can ignore it if you trust the source.
    5. -
    6. Follow the instructions on the screen to complete the installation process. You may have to grant some permissions or access to the app, such as phone, contacts, microphone, storage, etc.
    7. -
    8. Once the installation is done, you can launch the app and start using it to record your calls.
    9. -
    -

    To use call recording apps on Android 12, you will need to follow these steps:

    -
      -
    1. Enable accessibility permissions for the call recording app. This is necessary for the app to access your phone calls and microphone. To do this, go to the Settings app on your device, and tap on Accessibility. Then, find the call recording app and turn on its accessibility service.
    2. -
    3. Enable notification access for the call recording app. This is necessary for the app to detect and record incoming and outgoing calls. To do this, go to the Settings app on your device, and tap on Notifications. Then, find the call recording app and turn on its notification access.
    4. -
    5. Configure and customize the settings and options of the call recording app. You can choose whether to record all calls or only specific ones, whether to record in MP3 or WAV format, whether to adjust the audio quality and source, whether to add notes or folders to your recordings, etc.
    6. -
    7. Access, manage, and share your recorded calls. You can view your recorded calls from the app's main screen or from the notification bar. You can also search, sort, filter, rename, or delete your recordings. You can also share your recordings via email, cloud services, or social media.
    8. -
    -

    Conclusion

    -

    Call recording is a useful feature that can help you keep track of your conversations, improve your customer service, comply with legal regulations, and more. However, call recording on Android 12 is not as simple as it sounds. You need to choose a compatible and reliable call recording app that works well on Android 12 devices. You also need to install it from outside the Play Store, and enable some permissions and settings for it to work properly.

    -

    In this article, we have provided you with an outline and a detailed guide on how to choose and install the best call recording apps for Android 12. We have compared the features, pros, and cons of five popular and reliable call recording apps that are compatible with Android 12: ACR Phone +, Cube ACR, CallU, TrueCall, and your stock phone dialer. We have also explained how to install APK files from outside the Play Store, how to enable unknown sources and accessibility permissions for call recording apps, how to configure and customize the settings and options of each app, and how to access, manage, and share your recorded calls.

    -

    We hope that this article has helped you find the best call recording app for your needs. We invite you to try out these apps and share your feedback with us. Do you have any questions or suggestions about call recording apps for Android 12? Let us know in the comments below!

    -

    FAQs

    -

    Here are some common questions and answers about call recording apps for Android 12:

    -

    Is call recording legal?

    -

    The legality of call recording depends on various factors, such as your location, the purpose of recording, and whether you have consent from the other party or parties involved in the call. In general, you should always inform and obtain consent from the other party or parties before recording a call. You should also check your local laws and regulations before using any call recording app.

    -

    How can I record VoIP calls from other apps?

    -

    Some call recording apps can record VoIP calls from other apps, such as WhatsApp, Skype, Viber, Telegram, etc. However, not all call recording apps can do this, and some may require additional plug-ins or settings to enable this feature. Some of the call recording apps that can record VoIP calls from other apps are ACR Phone + and Cube ACR. You can check their websites or FAQs for more details on how to enable and use this feature.

    -

    How can I backup or restore my recorded calls?

    -

    Most call recording apps allow you to backup or restore your recorded calls to or from your device's internal storage, external SD card, or cloud services, such as Google Drive, Dropbox, OneDrive, etc. You can usually find this option in the app's settings or menu. You can also use a file manager app to copy or move your recorded calls to or from your desired location.

    -

    How can I delete or hide my recorded calls?

    -

    Most call recording apps allow you to delete or hide your recorded calls from the app's main screen or menu. You can usually select one or more recordings and tap on the delete or hide option. You can also use a file manager app to delete or hide your recorded calls from your device's storage. However, be careful when deleting or hiding your recorded calls, as you may not be able to recover them later.

    -

    How can I improve the quality of my recorded calls?

    -

    The quality of your recorded calls may depend on various factors, such as your device model, network signal, audio source, audio format, audio quality settings, etc. To improve the quality of your recorded calls, you can try the following tips:

    -
      -
    • Use a good-quality microphone and headset for your calls.
    • -
    • Avoid noisy environments and background noises when making or receiving calls.
    • -
    • Choose a high-quality audio format and bitrate for your recordings, such as MP3 or WAV.
    • -
    • Adjust the audio source and quality settings in your call recording app to suit your needs.
    • -
    • Test different call recording apps and see which one works best for you.
    • -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/simplyjaga/neural_style_tranfer_using_dense_net/app.py b/spaces/simplyjaga/neural_style_tranfer_using_dense_net/app.py deleted file mode 100644 index 1136b9f2c22692c4b117fbdad0bc22a8e8eba20e..0000000000000000000000000000000000000000 --- a/spaces/simplyjaga/neural_style_tranfer_using_dense_net/app.py +++ /dev/null @@ -1,27 +0,0 @@ -import model -import gradio as gr - -#gui -demo = gr.Blocks() -with demo: - gr.Markdown( - """# Neural Style Transfer Using DenseNet - Since running this demo takes too much time without gpu, copy and try it out in the colab with gpu option for nearly 1000 steps to get an pretty decent output - """) - - with gr.Row(): - with gr.Column(): - input =[gr.Image(label='Style Image', type='pil'), - gr.Image(label='Content Image', type='pil'), - gr.Slider(0, 1, value=1, label='Alpha (amout of info from content image)'), - gr.Slider(0, 1, value=0.02, label='Beta (amout of style from style image)'), - gr.Number(label='Step (no.of generation updates) - keep it below 20 because it takes too much time without gpu')] - with gr.Column(): - output = gr.Image(label='Image after Style Transfer') - gr.Examples([['examples/style.jpg','examples/content.jpg']], - inputs=input) - - btn = gr.Button("Transfer Style") - btn.click(fn=model.get_output, inputs=input, outputs=output) - -demo.queue().launch() \ No newline at end of file diff --git a/spaces/skytnt/moe-tts/text/__init__.py b/spaces/skytnt/moe-tts/text/__init__.py deleted file mode 100644 index 4e69c354dd24e3243980236eca962cd5945a92fc..0000000000000000000000000000000000000000 --- a/spaces/skytnt/moe-tts/text/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/songweig/rich-text-to-image/app_sd.py b/spaces/songweig/rich-text-to-image/app_sd.py deleted file mode 100644 index 2da9eb6bae6d960dea39e6397294cd51651f91a9..0000000000000000000000000000000000000000 --- a/spaces/songweig/rich-text-to-image/app_sd.py +++ /dev/null @@ -1,557 +0,0 @@ -import math -import random -import os -import json -import time -import argparse -import torch -import numpy as np -from torchvision import transforms - -from models.region_diffusion import RegionDiffusion -from utils.attention_utils import get_token_maps -from utils.richtext_utils import seed_everything, parse_json, get_region_diffusion_input,\ - get_attention_control_input, get_gradient_guidance_input - - -import gradio as gr -from PIL import Image, ImageOps -from share_btn import community_icon_html, loading_icon_html, share_js, css - - -help_text = """ -If you are encountering an error or not achieving your desired outcome, here are some potential reasons and recommendations to consider: -1. If you format only a portion of a word rather than the complete word, an error may occur. -2. If you use font color and get completely corrupted results, you may consider decrease the color weight lambda. -3. Consider using a different seed. -""" - - -canvas_html = """""" -get_js_data = """ -async (text_input, negative_prompt, height, width, seed, steps, num_segments, segment_threshold, inject_interval, guidance_weight, color_guidance_weight, rich_text_input, background_aug) => { - const richEl = document.getElementById("rich-text-root"); - const data = richEl? richEl.contentDocument.body._data : {}; - return [text_input, negative_prompt, height, width, seed, steps, num_segments, segment_threshold, inject_interval, guidance_weight, color_guidance_weight, JSON.stringify(data), background_aug]; -} -""" -set_js_data = """ -async (text_input) => { - const richEl = document.getElementById("rich-text-root"); - const data = text_input ? JSON.parse(text_input) : null; - if (richEl && data) richEl.contentDocument.body.setQuillContents(data); -} -""" - -get_window_url_params = """ -async (url_params) => { - const params = new URLSearchParams(window.location.search); - url_params = Object.fromEntries(params); - return [url_params]; -} -""" - - -def load_url_params(url_params): - if 'prompt' in url_params: - return gr.update(visible=True), url_params - else: - return gr.update(visible=False), url_params - - -def main(): - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - model = RegionDiffusion(device) - - def generate( - text_input: str, - negative_text: str, - height: int, - width: int, - seed: int, - steps: int, - num_segments: int, - segment_threshold: float, - inject_interval: float, - guidance_weight: float, - color_guidance_weight: float, - rich_text_input: str, - background_aug: bool, - ): - run_dir = 'results/' - os.makedirs(run_dir, exist_ok=True) - # Load region diffusion model. - height = int(height) - width = int(width) - steps = 41 if not steps else steps - guidance_weight = 8.5 if not guidance_weight else guidance_weight - text_input = rich_text_input if rich_text_input != '' else text_input - print('text_input', text_input) - if (text_input == '' or rich_text_input == ''): - raise gr.Error("Please enter some text.") - # parse json to span attributes - base_text_prompt, style_text_prompts, footnote_text_prompts, footnote_target_tokens,\ - color_text_prompts, color_names, color_rgbs, size_text_prompts_and_sizes, use_grad_guidance = parse_json( - json.loads(text_input)) - - # create control input for region diffusion - region_text_prompts, region_target_token_ids, base_tokens = get_region_diffusion_input( - model, base_text_prompt, style_text_prompts, footnote_text_prompts, - footnote_target_tokens, color_text_prompts, color_names) - - # create control input for cross attention - text_format_dict = get_attention_control_input( - model, base_tokens, size_text_prompts_and_sizes) - - # create control input for region guidance - text_format_dict, color_target_token_ids = get_gradient_guidance_input( - model, base_tokens, color_text_prompts, color_rgbs, text_format_dict, color_guidance_weight=color_guidance_weight) - - seed_everything(seed) - - # get token maps from plain text to image generation. - begin_time = time.time() - if model.selfattn_maps is None and model.crossattn_maps is None: - model.remove_tokenmap_hooks() - model.register_tokenmap_hooks() - else: - model.reset_attention_maps() - model.remove_tokenmap_hooks() - plain_img = model.produce_attn_maps([base_text_prompt], [negative_text], - height=height, width=width, num_inference_steps=steps, - guidance_scale=guidance_weight) - print('time lapses to get attention maps: %.4f' % - (time.time()-begin_time)) - seed_everything(seed) - color_obj_masks, segments_vis, token_maps = get_token_maps(model.selfattn_maps, model.crossattn_maps, model.n_maps, run_dir, - 512//8, 512//8, color_target_token_ids[:-1], seed, - base_tokens, segment_threshold=segment_threshold, num_segments=num_segments, - return_vis=True) - seed_everything(seed) - model.masks, segments_vis, token_maps = get_token_maps(model.selfattn_maps, model.crossattn_maps, model.n_maps, run_dir, - 512//8, 512//8, region_target_token_ids[:-1], seed, - base_tokens, segment_threshold=segment_threshold, num_segments=num_segments, - return_vis=True) - color_obj_masks = [transforms.functional.resize(color_obj_mask, (height, width), - interpolation=transforms.InterpolationMode.BICUBIC, - antialias=True) - for color_obj_mask in color_obj_masks] - text_format_dict['color_obj_atten'] = color_obj_masks - model.remove_tokenmap_hooks() - - # generate image from rich text - begin_time = time.time() - seed_everything(seed) - if background_aug: - bg_aug_end = 500 - else: - bg_aug_end = 1000 - rich_img = model.prompt_to_img(region_text_prompts, [negative_text], - height=height, width=width, num_inference_steps=steps, - guidance_scale=guidance_weight, use_guidance=use_grad_guidance, - text_format_dict=text_format_dict, inject_selfattn=inject_interval, - bg_aug_end=bg_aug_end) - print('time lapses to generate image from rich text: %.4f' % - (time.time()-begin_time)) - return [plain_img[0], rich_img[0], segments_vis, token_maps] - - with gr.Blocks(css=css) as demo: - url_params = gr.JSON({}, visible=False, label="URL Params") - gr.HTML("""

    Expressive Text-to-Image Generation with Rich Text

    -

    Songwei Ge, Taesung Park, Jun-Yan Zhu, Jia-Bin Huang

    -

    UMD, Adobe, CMU

    -

    Duplicate Space | [Website] | [Code] | [Paper]

    -

    For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.""") - with gr.Row(): - with gr.Column(): - rich_text_el = gr.HTML(canvas_html, elem_id="canvas_html") - rich_text_input = gr.Textbox(value="", visible=False) - text_input = gr.Textbox( - label='Rich-text JSON Input', - visible=False, - max_lines=1, - placeholder='Example: \'{"ops":[{"insert":"a Gothic "},{"attributes":{"color":"#b26b00"},"insert":"church"},{"insert":" in a the sunset with a beautiful landscape in the background.\n"}]}\'', - elem_id="text_input" - ) - negative_prompt = gr.Textbox( - label='Negative Prompt', - max_lines=1, - placeholder='Example: poor quality, blurry, dark, low resolution, low quality, worst quality', - elem_id="negative_prompt" - ) - segment_threshold = gr.Slider(label='Token map threshold', - info='(See less area in token maps? Decrease this. See too much area? Increase this.)', - minimum=0, - maximum=1, - step=0.01, - value=0.25) - inject_interval = gr.Slider(label='Detail preservation', - info='(To preserve more structure from plain-text generation, increase this. To see more rich-text attributes, decrease this.)', - minimum=0, - maximum=1, - step=0.01, - value=0.) - color_guidance_weight = gr.Slider(label='Color weight', - info='(To obtain more precise color, increase this, while too large value may cause artifacts.)', - minimum=0, - maximum=2, - step=0.1, - value=0.5) - num_segments = gr.Slider(label='Number of segments', - minimum=2, - maximum=20, - step=1, - value=9) - seed = gr.Slider(label='Seed', - minimum=0, - maximum=100000, - step=1, - value=6, - elem_id="seed" - ) - background_aug = gr.Checkbox( - label='Precise region alignment', - info='(For strict region alignment, select this option, but beware of potential artifacts when using with style.)', - value=True) - with gr.Accordion('Other Parameters', open=False): - steps = gr.Slider(label='Number of Steps', - minimum=0, - maximum=500, - step=1, - value=41) - guidance_weight = gr.Slider(label='CFG weight', - minimum=0, - maximum=50, - step=0.1, - value=8.5) - width = gr.Dropdown(choices=[512], - value=512, - label='Width', - visible=True) - height = gr.Dropdown(choices=[512], - value=512, - label='height', - visible=True) - - with gr.Row(): - with gr.Column(scale=1, min_width=100): - generate_button = gr.Button("Generate") - load_params_button = gr.Button( - "Load from URL Params", visible=True) - with gr.Column(): - richtext_result = gr.Image( - label='Rich-text', elem_id="rich-text-image") - richtext_result.style(height=512) - with gr.Row(): - plaintext_result = gr.Image( - label='Plain-text', elem_id="plain-text-image") - segments = gr.Image(label='Segmentation') - with gr.Row(): - token_map = gr.Image(label='Token Maps') - with gr.Row(visible=False) as share_row: - with gr.Group(elem_id="share-btn-container"): - community_icon = gr.HTML(community_icon_html) - loading_icon = gr.HTML(loading_icon_html) - share_button = gr.Button( - "Share to community", elem_id="share-btn") - share_button.click(None, [], [], _js=share_js) - with gr.Row(): - gr.Markdown(help_text) - - with gr.Row(): - footnote_examples = [ - [ - '{"ops":[{"insert":"A close-up 4k dslr photo of a "},{"attributes":{"link":"A cat wearing sunglasses and a bandana around its neck."},"insert":"cat"},{"insert":" riding a scooter. Palm trees in the background."}]}', - '', - 5, - 0.3, - 0, - 6, - 1, - None, - True - ], - [ - '{"ops":[{"insert":"A "},{"attributes":{"link":"kitchen island with a stove with gas burners and a built-in oven "},"insert":"kitchen island"},{"insert":" next to a "},{"attributes":{"link":"an open refrigerator stocked with fresh produce, dairy products, and beverages. "},"insert":"refrigerator"},{"insert":", by James McDonald and Joarc Architects, home, interior, octane render, deviantart, cinematic, key art, hyperrealism, sun light, sunrays, canon eos c 300, ƒ 1.8, 35 mm, 8k, medium - format print"}]}', - '', - 6, - 0.5, - 0, - 6, - 1, - None, - True - ], - [ - '{"ops":[{"insert":"A "},{"attributes":{"link":"Happy Kung fu panda art, elder, asian art, volumetric lighting, dramatic scene, ultra detailed, realism, chinese"},"insert":"panda"},{"insert":" standing on a cliff by a waterfall, wildlife photography, photograph, high quality, wildlife, f 1.8, soft focus, 8k, national geographic, award - winning photograph by nick nichols"}]}', - '', - 4, - 0.3, - 0, - 4, - 1, - None, - True - ], - ] - - gr.Examples(examples=footnote_examples, - label='Footnote examples', - inputs=[ - text_input, - negative_prompt, - num_segments, - segment_threshold, - inject_interval, - seed, - color_guidance_weight, - rich_text_input, - background_aug, - ], - outputs=[ - plaintext_result, - richtext_result, - segments, - token_map, - ], - fn=generate, - # cache_examples=True, - examples_per_page=20) - with gr.Row(): - color_examples = [ - [ - '{"ops":[{"insert":"a beautifule girl with big eye, skin, and long "},{"attributes":{"color":"#00ffff"},"insert":"hair"},{"insert":", t-shirt, bursting with vivid color, intricate, elegant, highly detailed, photorealistic, digital painting, artstation, illustration, concept art."}]}', - 'lowres, had anatomy, bad hands, cropped, worst quality', - 9, - 0.25, - 0.3, - 6, - 0.5, - None, - True - ], - [ - '{"ops":[{"insert":"a beautifule girl with big eye, skin, and long "},{"attributes":{"color":"#eeeeee"},"insert":"hair"},{"insert":", t-shirt, bursting with vivid color, intricate, elegant, highly detailed, photorealistic, digital painting, artstation, illustration, concept art."}]}', - 'lowres, had anatomy, bad hands, cropped, worst quality', - 9, - 0.25, - 0.3, - 6, - 0.1, - None, - True - ], - [ - '{"ops":[{"insert":"a Gothic "},{"attributes":{"color":"#FD6C9E"},"insert":"church"},{"insert":" in a the sunset with a beautiful landscape in the background."}]}', - '', - 5, - 0.3, - 0.5, - 6, - 0.5, - None, - False - ], - [ - '{"ops":[{"insert":"A mesmerizing sight that captures the beauty of a "},{"attributes":{"color":"#4775fc"},"insert":"rose"},{"insert":" blooming, close up"}]}', - '', - 3, - 0.3, - 0, - 9, - 1, - None, - False - ], - [ - '{"ops":[{"insert":"A "},{"attributes":{"color":"#FFD700"},"insert":"marble statue of a wolf\'s head and shoulder"},{"insert":", surrounded by colorful flowers michelangelo, detailed, intricate, full of color, led lighting, trending on artstation, 4 k, hyperrealistic, 3 5 mm, focused, extreme details, unreal engine 5, masterpiece "}]}', - '', - 5, - 0.3, - 0, - 5, - 0.6, - None, - False - ], - ] - gr.Examples(examples=color_examples, - label='Font color examples', - inputs=[ - text_input, - negative_prompt, - num_segments, - segment_threshold, - inject_interval, - seed, - color_guidance_weight, - rich_text_input, - background_aug, - ], - outputs=[ - plaintext_result, - richtext_result, - segments, - token_map, - ], - fn=generate, - # cache_examples=True, - examples_per_page=20) - - with gr.Row(): - style_examples = [ - [ - '{"ops":[{"insert":"a "},{"attributes":{"font":"mirza"},"insert":"beautiful garden"},{"insert":" with a "},{"attributes":{"font":"roboto"},"insert":"snow mountain in the background"},{"insert":""}]}', - '', - 10, - 0.45, - 0, - 0.2, - 3, - 0.5, - None, - False - ], - [ - '{"ops":[{"attributes":{"link":"the awe-inspiring sky and ocean in the style of J.M.W. Turner"},"insert":"the awe-inspiring sky and sea"},{"insert":" by "},{"attributes":{"font":"mirza"},"insert":"a coast with flowers and grasses in spring"}]}', - 'worst quality, dark, poor quality', - 2, - 0.45, - 0, - 9, - 0.5, - None, - False - ], - [ - '{"ops":[{"insert":"a "},{"attributes":{"font":"slabo"},"insert":"night sky filled with stars"},{"insert":" above a "},{"attributes":{"font":"roboto"},"insert":"turbulent sea with giant waves"}]}', - '', - 2, - 0.45, - 0, - 0, - 6, - 0.5, - None, - False - ], - ] - gr.Examples(examples=style_examples, - label='Font style examples', - inputs=[ - text_input, - negative_prompt, - num_segments, - segment_threshold, - inject_interval, - seed, - color_guidance_weight, - rich_text_input, - background_aug, - ], - outputs=[ - plaintext_result, - richtext_result, - segments, - token_map, - ], - fn=generate, - # cache_examples=True, - examples_per_page=20) - - with gr.Row(): - size_examples = [ - [ - '{"ops": [{"insert": "A pizza with "}, {"attributes": {"size": "60px"}, "insert": "pineapple"}, {"insert": ", pepperoni, and mushroom on the top, 4k, photorealistic"}]}', - 'blurry, art, painting, rendering, drawing, sketch, ugly, duplicate, morbid, mutilated, mutated, deformed, disfigured low quality, worst quality', - 5, - 0.3, - 0, - 13, - 1, - None, - False - ], - [ - '{"ops": [{"insert": "A pizza with pineapple, "}, {"attributes": {"size": "20px"}, "insert": "pepperoni"}, {"insert": ", and mushroom on the top, 4k, photorealistic"}]}', - 'blurry, art, painting, rendering, drawing, sketch, ugly, duplicate, morbid, mutilated, mutated, deformed, disfigured low quality, worst quality', - 5, - 0.3, - 0, - 13, - 1, - None, - False - ], - [ - '{"ops": [{"insert": "A pizza with pineapple, pepperoni, and "}, {"attributes": {"size": "70px"}, "insert": "mushroom"}, {"insert": " on the top, 4k, photorealistic"}]}', - 'blurry, art, painting, rendering, drawing, sketch, ugly, duplicate, morbid, mutilated, mutated, deformed, disfigured low quality, worst quality', - 5, - 0.3, - 0, - 13, - 1, - None, - False - ], - ] - gr.Examples(examples=size_examples, - label='Font size examples', - inputs=[ - text_input, - negative_prompt, - num_segments, - segment_threshold, - inject_interval, - seed, - color_guidance_weight, - rich_text_input, - background_aug, - ], - outputs=[ - plaintext_result, - richtext_result, - segments, - token_map, - ], - fn=generate, - # cache_examples=True, - examples_per_page=20) - generate_button.click(fn=lambda: gr.update(visible=False), inputs=None, outputs=share_row, queue=False).then( - fn=generate, - inputs=[ - text_input, - negative_prompt, - height, - width, - seed, - steps, - num_segments, - segment_threshold, - inject_interval, - guidance_weight, - color_guidance_weight, - rich_text_input, - background_aug - ], - outputs=[plaintext_result, richtext_result, segments, token_map], - _js=get_js_data - ).then( - fn=lambda: gr.update(visible=True), inputs=None, outputs=share_row, queue=False) - text_input.change( - fn=None, inputs=[text_input], outputs=None, _js=set_js_data, queue=False) - # load url param prompt to textinput - load_params_button.click(fn=lambda x: x['prompt'], inputs=[ - url_params], outputs=[text_input], queue=False) - demo.load( - fn=load_url_params, - inputs=[url_params], - outputs=[load_params_button, url_params], - _js=get_window_url_params - ) - demo.queue(concurrency_count=1) - demo.launch(share=False) - - -if __name__ == "__main__": - main() diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/truncated_bptt/transformer_xl_model.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/truncated_bptt/transformer_xl_model.py deleted file mode 100644 index a6c8b25a07276c2ee30c0aa5f0e4b0a2837ed5ca..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/truncated_bptt/transformer_xl_model.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from dataclasses import dataclass, field -from typing import Dict, List, Optional - -import torch -from fairseq.dataclass import FairseqDataclass -from fairseq.models import ( - FairseqIncrementalDecoder, - FairseqLanguageModel, - register_model, -) -from fairseq.modules.checkpoint_activations import checkpoint_wrapper -from omegaconf import II - - -logger = logging.getLogger(__name__) - - -@dataclass -class TransformerXLConfig(FairseqDataclass): - # defaults come from the original Transformer-XL code - cutoffs: List[int] = field(default_factory=lambda: [20000, 40000, 200000]) - d_model: int = 500 - n_head: int = 10 - d_head: int = 50 - d_inner: int = 1000 - div_val: int = 1 - n_layer: int = 12 - mem_len: int = 0 - clamp_len: int = -1 - same_length: bool = False - dropout: float = 0.0 - dropatt: float = 0.0 - checkpoint_activations: bool = False - offload_activations: bool = False - max_target_positions: int = II("task.max_target_positions") - - -@register_model("transformer_xl", dataclass=TransformerXLConfig) -class TransformerXLLanguageModel(FairseqLanguageModel): - @classmethod - def build_model(cls, cfg: TransformerXLConfig, task): - return cls(TransformerXLDecoder(cfg, task)) - - -class TransformerXLDecoder(FairseqIncrementalDecoder): - def __init__(self, cfg, task): - try: - from transformers.models.transfo_xl import ( - TransfoXLConfig, - TransfoXLLMHeadModel, - ) - except ImportError: - from transformers.configuration_transfo_xl import TransfoXLConfig - from transformers.modeling_transfo_xl import TransfoXLLMHeadModel - - super().__init__(task.target_dictionary) - self.cfg = cfg - - # remove any cutoffs larger than the vocab size - cutoffs = [ - cutoff for cutoff in cfg.cutoffs if cutoff < len(task.target_dictionary) - ] - - config = TransfoXLConfig( - vocab_size=len(task.target_dictionary), - cutoffs=cutoffs, - d_model=cfg.d_model, - d_embed=cfg.d_model, - n_head=cfg.n_head, - d_head=cfg.d_head, - d_inner=cfg.d_inner, - div_val=cfg.div_val, - n_layer=cfg.n_layer, - mem_len=cfg.mem_len, - clamp_len=cfg.clamp_len, - same_length=cfg.same_length, - dropout=cfg.dropout, - dropatt=cfg.dropatt, - ) - logger.info(config) - self.model = TransfoXLLMHeadModel(config) - - # Workaround a bug in huggingface's ``ProjectedAdaptiveLogSoftmax`` - # which adds ``None`` values to an ``nn.ParameterList``, which is not - # supported in PyTorch. Instead we can replace this with an - # ``nn.ModuleList``, which does support ``None`` values. - try: - if all(p is None for p in self.model.crit.out_projs._parameters.values()): - self.model.crit.out_projs = torch.nn.ModuleList( - [None] * len(self.model.crit.out_projs._parameters) - ) - except Exception: - pass - - if cfg.checkpoint_activations or cfg.offload_activations: - for i in range(len(self.model.transformer.layers)): - self.model.transformer.layers[i] = checkpoint_wrapper( - self.model.transformer.layers[i], - offload_to_cpu=cfg.offload_activations, - ) - # TODO: may save mem to wrap(layer.pos_ff.CoreNet[3]) - - self._mems = None - - def forward( - self, - src_tokens, - src_lengths=None, # unused - incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None, - encoder_out=None, - ): - if incremental_state is not None: # used during inference - mems = self.get_incremental_state(incremental_state, "mems") - src_tokens = src_tokens[:, -1:] # only keep the most recent token - else: - mems = self._mems - - output = self.model( - input_ids=src_tokens, - mems=mems, - return_dict=False, - ) - - if len(output) >= 2: - if incremental_state is not None: - self.set_incremental_state(incremental_state, "mems", output[1]) - else: - self._mems = output[1] - - return (output[0],) - - def max_positions(self): - return self.cfg.max_target_positions - - def reorder_incremental_state( - self, - incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]], - new_order: torch.Tensor, - ): - """Reorder incremental state. - - This will be called when the order of the input has changed from the - previous time step. A typical use case is beam search, where the input - order changes between time steps based on the selection of beams. - """ - mems = self.get_incremental_state(incremental_state, "mems") - if mems is not None: - new_mems = [mems_i.index_select(1, new_order) for mems_i in mems] - self.set_incremental_state(incremental_state, "mems", new_mems) diff --git a/spaces/stomexserde/gpt4-ui/Examples/Fallout 4 Map Symbol Key.md b/spaces/stomexserde/gpt4-ui/Examples/Fallout 4 Map Symbol Key.md deleted file mode 100644 index b914cf4e3e98da71bdcb080399a9424b816aa2db..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Fallout 4 Map Symbol Key.md +++ /dev/null @@ -1,39 +0,0 @@ -
    -

    How to Understand and Customize the Fallout 4 Map Symbols

    -

    Fallout 4 is a vast open-world game that offers many hours of exploration and combat. One of the most useful features of the game is the map, which shows you the locations of various places, items, enemies, and quests. However, the map can also be confusing and overwhelming at first, especially if you don't know what the different symbols mean.

    -

    In this article, we will explain the meaning of some of the most common map symbols in Fallout 4, and how you can customize them to suit your preferences. We will also provide some tips on how to use the map effectively and find what you are looking for.

    -

    Fallout 4 Map Symbol Key


    DOWNLOAD ○○○ https://urlgoal.com/2uI9sN



    -

    What do the map symbols mean in Fallout 4?

    -

    The map symbols in Fallout 4 are divided into three categories: collectibles, pickups, and locations. Each category has a different color and shape to help you distinguish them.

    -
      -
    • Collectibles are items that have a special value or function, such as bobbleheads, holotapes, perk magazines, and keys. They are marked with a yellow star on the map.
    • -
    • Pickups are items that can be looted or used, such as fusion cores, mini nukes, nuka-colas, power armors, and weapons. They are marked with a green circle on the map.
    • -
    • Locations are places that you can visit or explore, such as bunkers, caves, cities, factories, farms, settlements, vaults, and more. They are marked with a blue diamond on the map.
    • -
    -

    Some locations have additional symbols inside them to indicate their type or status. For example:

    -
      -
    • A skull means that the location is dangerous or hostile.
    • -
    • A gear means that the location is part of a quest or objective.
    • -
    • A flag means that the location is controlled by a faction or group.
    • -
    • A house means that the location is a settlement that you can build or manage.
    • -
    • A pip-boy means that the location is a fast travel point that you have discovered.
    • -
    -

    How to customize the map symbols in Fallout 4?

    -

    If you want to change the appearance or behavior of the map symbols in Fallout 4, you have a few options. You can either use mods or edit some files manually.

    -

    One of the most popular mods for customizing the map symbols is Fallout 4 Map by xunilinuX, which allows you to change the size, color, opacity, and visibility of the symbols. You can also filter them by category or type, and add custom icons for your own markers. The mod requires Fallout 4 Script Extender (F4SE) to work.

    -

    If you prefer to edit some files manually, you can follow these steps:

    -
      -
    1. Go to your Fallout 4 installation folder (usually C:\Program Files (x86)\Steam\steamapps\common\Fallout 4).
    2. -
    3. Open the Data folder and then open the Interface folder.
    4. -
    5. Make a backup copy of the file named Map.swf.
    6. -
    7. Open Map.swf with a program that can edit Flash files, such as JPEXS Free Flash Decompiler.
    8. -
    9. Navigate to the Symbols folder and then to the subfolder of the symbol category that you want to edit (Collectibles, Pickups, or Locations).
    10. -
    11. Select the symbol that you want to edit and change its properties as you wish. You can modify its shape, color, size, rotation, alpha (opacity), filters (glow, shadow), etc.
    12. -
    13. Save your changes and close the program.
    14. -
    15. Launch Fallout 4 and enjoy your customized map symbols.
    16. -
    -

    How to use the map effectively in Fallout 4?

    -

    The map is

    -

    81aa517590
    -
    -
    \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/HACK LockXLS V4.6.0.md b/spaces/stomexserde/gpt4-ui/Examples/HACK LockXLS V4.6.0.md deleted file mode 100644 index e3cd8bd71a36e1b80d286f3e5d91b7ba60945a6d..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/HACK LockXLS V4.6.0.md +++ /dev/null @@ -1,23 +0,0 @@ -
    -

    How to Hack LockXLS V4.6.0 and Unlock Excel Files

    -

    LockXLS is a software that allows you to protect your Excel files with passwords, serial numbers, activation codes, or hardware-based keys. However, sometimes you may forget your password or lose your key, and you need to access your files urgently. In this article, I will show you how to hack LockXLS V4.6.0 and unlock your Excel files without any software or technical skills.

    -

    HACK LockXLS V4.6.0


    DOWNLOADhttps://urlgoal.com/2uI7cY



    -

    Before we start, I want to warn you that hacking LockXLS V4.6.0 is illegal and unethical, and you should only do it if you have the permission of the file owner or if you are the file owner yourself. I am not responsible for any consequences that may arise from your actions.

    -

    Step 1: Locate the Locked Excel File

    -

    The first step is to locate the locked Excel file that you want to hack. You can find it in your computer or in an external device such as a USB flash drive or a CD-ROM. If the file has a .xls or .xlsx extension, it means that it is a normal Excel file that is not protected by LockXLS. If the file has a .xlsc extension, it means that it is a compiled Excel file that is protected by LockXLS.

    -

    Step 2: Rename the Locked Excel File

    -

    The next step is to rename the locked Excel file and change its extension from .xlsc to .zip. This will trick LockXLS into thinking that the file is a compressed archive instead of an Excel file. To do this, right-click on the file and select Rename. Then, delete the .xlsc part and type .zip instead. Press Enter to confirm the change.

    -

    Step 3: Extract the Locked Excel File

    -

    The third step is to extract the locked Excel file and access its contents. To do this, right-click on the file and select Extract All. Choose a destination folder where you want to save the extracted files and click Extract. You will see a folder with the same name as the original file, containing several subfolders and files.

    -

    Step 4: Find the Password File

    -

    The fourth step is to find the password file that contains the encryption key for the locked Excel file. To do this, open the folder that you extracted in the previous step and look for a subfolder named xlsc_data. Inside this subfolder, you will find a file named password.dat. This is the file that we need to hack.

    -

    -

    Step 5: Open the Password File with Notepad

    -

    The fifth step is to open the password file with Notepad and view its contents. To do this, right-click on the password.dat file and select Open With. Choose Notepad from the list of programs and click OK. You will see a bunch of numbers and letters in hexadecimal format.

    -

    Step 6: Convert the Password File from Hexadecimal to ASCII

    -

    The sixth step is to convert the password file from hexadecimal to ASCII and reveal the encryption key for the locked Excel file. To do this, copy all the text from Notepad and paste it into an online hex-to-ASCII converter such as this one. Click Convert and you will see a string of characters in ASCII format.

    -

    Step 7: Use the Encryption Key to Unlock the Excel File

    -

    The final step is to use the encryption key to unlock the Excel file and view its contents. To do this, open LockXLS V4.6.0 and select Open Compiled Workbook from the menu bar. Browse for the original locked Excel file with .xlsc extension and click Open. When prompted for a password or a key, enter the encryption key that you obtained from the previous step and click OK. You will see your Excel file unlocked and ready to use.

    -

    Congratulations! You have successfully hacked LockXLS V4.6.0 and unlocked your Excel files!

    e93f5a0c3f
    -
    -
    \ No newline at end of file diff --git a/spaces/sujithvamshi/vehicle-color-recognition/README.md b/spaces/sujithvamshi/vehicle-color-recognition/README.md deleted file mode 100644 index fc83633647b7c592c2fc5c467eb56528e18e48ac..0000000000000000000000000000000000000000 --- a/spaces/sujithvamshi/vehicle-color-recognition/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Vehicle Color Recognition -emoji: 🌖 -colorFrom: yellow -colorTo: green -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Imgchili Dolcemodz Star 013 Gallery _VERIFIED_.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Imgchili Dolcemodz Star 013 Gallery _VERIFIED_.md deleted file mode 100644 index 8c617b9920064e0f3ad42272be9a2630f07616c6..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Imgchili Dolcemodz Star 013 Gallery _VERIFIED_.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Imgchili Dolcemodz Star 013 Gallery


    Download >>> https://cinurl.com/2uEYUN



    -
    -z File name of the 3D image A 3D image consists of both JPEG and MPO files. ... 52 alldata auto repair Imgchili Dolcemodz Star 013 Gallery Free download ... 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Root Superuser 3.1.3 Busybox 1.20.2-Update1-signed.zip !!BETTER!!.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Root Superuser 3.1.3 Busybox 1.20.2-Update1-signed.zip !!BETTER!!.md deleted file mode 100644 index d5aaab0171bd5fee7830f84e0673e5364c232510..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Root Superuser 3.1.3 Busybox 1.20.2-Update1-signed.zip !!BETTER!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Root Superuser 3.1.3 Busybox 1.20.2-Update1-signed.zip


    Downloadhttps://cinurl.com/2uEZ4T



    -
    -Aide PDF to DWG Converter 11 0 (x32 x64) [ENG] [Keygen] Portable 6 · Root Superuser 3.1.3 Busybox 1.20.2-Update1-signed.zip · unlock lt18i ... 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/Backpackers Adventure Ingles Para Quem Quer Se Aventurar, Estudar E Trabalhar No Exterior.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/Backpackers Adventure Ingles Para Quem Quer Se Aventurar, Estudar E Trabalhar No Exterior.md deleted file mode 100644 index a6bdf71663daac7d8bce15fb563347196c1f76d6..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/Backpackers Adventure Ingles Para Quem Quer Se Aventurar, Estudar E Trabalhar No Exterior.md +++ /dev/null @@ -1,68 +0,0 @@ -## Backpackers Adventure Ingles Para Quem Quer Se Aventurar, Estudar E Trabalhar No Exterior - - - - - - - - - -**Download File ->>> [https://cayseypisi.blogspot.com/?c=2tyeWs](https://cayseypisi.blogspot.com/?c=2tyeWs)** - - - - - - - - - - - - Here is a possible title and article with html formatting for the keyword "Backpackers Adventure Ingles Para Quem Quer Se Aventurar, Estudar E Trabalhar No Exterior": - -# Backpackers Adventure: Aprenda Inglês e Explore o Mundo - - - -Se você sonha em viajar pelo mundo, conhecer novas culturas e aprender inglês ao mesmo tempo, o livro Backpackers Adventure é para você. Neste livro, você vai encontrar dicas práticas, diálogos reais e depoimentos inspiradores de pessoas que se aventuraram em diversos países. - - - -Backpackers Adventure é mais do que um livro de inglês. É um guia para quem quer estudar e trabalhar no exterior, aproveitando as oportunidades que existem em lugares como China, Índia e África do Sul. Você vai aprender expressões próprias de cada país, entender as diferenças culturais e saber como se preparar para uma viagem inesquecível. - - - -Além disso, o livro também oferece recursos online, como sites de museus, restaurantes e hotéis, que vão ajudar você a planejar sua viagem e praticar seu inglês. Com Backpackers Adventure, você vai descobrir que aprender inglês pode ser divertido e enriquecedor. - - - -Não perca tempo e embarque nessa aventura. Compre já o seu exemplar de Backpackers Adventure e comece a realizar seu sonho de viajar pelo mundo. - -Here is a possible continuation of the article: - -Se você está se perguntando quais são os melhores destinos para os mochileiros, nós temos algumas sugestões para você. Confira a nossa lista dos 20 melhores lugares para se aventurar em 2023: - - - -1. Sendai - Japão: Se você quer conhecer o Japão além de Tóquio, Sendai é uma ótima opção. Essa cidade é famosa pelas suas festas tradicionais, pela sua gastronomia deliciosa e pela sua natureza exuberante. Você pode visitar templos históricos, cachoeiras impressionantes e até mesmo um vulcão ativo. - -2. Lisboa - Portugal: Lisboa é uma das capitais mais charmosas da Europa, com um clima agradável, uma arquitetura encantadora e uma vida noturna animada. Você pode explorar os bairros históricos, admirar as vistas panorâmicas e saborear os famosos pastéis de nata. - -3. Cusco - Peru: Cusco é a porta de entrada para o Machu Picchu, uma das maravilhas do mundo. Mas essa cidade tem muito mais a oferecer do que isso. Você pode conhecer a cultura inca, visitar museus e mercados coloridos e participar de festivais e rituais ancestrais. - -4. Cape Town - África do Sul: Cape Town é uma cidade vibrante, cosmopolita e cheia de contrastes. Você pode curtir as praias paradisíacas, as montanhas majestosas e os vinhedos renomados. Você também pode fazer safáris, mergulhar com tubarões e conhecer a história e a diversidade da África do Sul. - -5. Bangkok - Tailândia: Bangkok é uma cidade que nunca dorme, com uma energia contagiante, uma cultura fascinante e uma culinária irresistível. Você pode visitar templos budistas, palácios reais e mercados flutuantes. Você também pode aproveitar a vida noturna, as massagens e as ilhas paradisíacas próximas. - - - -E aí, ficou com vontade de viajar? Então não perca tempo e reserve já o seu voo e o seu hostel. Com Backpackers Adventure, você vai ter as melhores dicas e experiências para aproveitar ao máximo a sua viagem. - - dfd1c89656 - - - - - diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Assassins Creed Unity The Complete Edition Repack Mr DJ Crack Free.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Assassins Creed Unity The Complete Edition Repack Mr DJ Crack Free.md deleted file mode 100644 index f644f661273043ca94cafa9325be479135c63dd7..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Assassins Creed Unity The Complete Edition Repack Mr DJ Crack Free.md +++ /dev/null @@ -1,11 +0,0 @@ -

    Assassins Creed Unity : The Complete Edition repack Mr DJ crack free


    Download 🆗 https://urluss.com/2uCDII



    - -March 22, 2564 BC. — Trilogy Assassins Creed IV Black Flag repack Mr DJ ASTRONEER. ... Creed.Syndicate.Gold.Edition.Update.6(v1.5).and.Crack-3DM (329 MB) .... 20 Sep 2014 ... -Download torrent [Download the game via torrent for free |... -To download Creed Syndicate (2015) License torrent for free, just download the .torrent file (main file download link) ... -[ Download torrent (886.5Kb) Downloads: 1749]. -November 12, 2012 ...If you download the game via torrent Creed: Syndicate - Gold Edition (2015) from the torrent for free, then you can enjoy the game at a convenient time, without ... -Download game Creed: Syndicate - Gold Edition (2015) via torrent. 8a78ff9644
    -
    -
    -

    diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/configs/_base_/models/fast_scnn.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/configs/_base_/models/fast_scnn.py deleted file mode 100644 index 32fdeb659355a5ce5ef2cc7c2f30742703811cdf..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/configs/_base_/models/fast_scnn.py +++ /dev/null @@ -1,57 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01) -model = dict( - type='EncoderDecoder', - backbone=dict( - type='FastSCNN', - downsample_dw_channels=(32, 48), - global_in_channels=64, - global_block_channels=(64, 96, 128), - global_block_strides=(2, 2, 1), - global_out_channels=128, - higher_in_channels=64, - lower_in_channels=128, - fusion_out_channels=128, - out_indices=(0, 1, 2), - norm_cfg=norm_cfg, - align_corners=False), - decode_head=dict( - type='DepthwiseSeparableFCNHead', - in_channels=128, - channels=128, - concat_input=False, - num_classes=19, - in_index=-1, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)), - auxiliary_head=[ - dict( - type='FCNHead', - in_channels=128, - channels=32, - num_convs=1, - num_classes=19, - in_index=-2, - norm_cfg=norm_cfg, - concat_input=False, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)), - dict( - type='FCNHead', - in_channels=64, - channels=32, - num_convs=1, - num_classes=19, - in_index=-3, - norm_cfg=norm_cfg, - concat_input=False, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)), - ], - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/runner/iter_based_runner.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/runner/iter_based_runner.py deleted file mode 100644 index 1df4de8c0285669dec9b014dfd1f3dd1600f0831..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/runner/iter_based_runner.py +++ /dev/null @@ -1,273 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -import platform -import shutil -import time -import warnings - -import torch -from torch.optim import Optimizer - -import annotator.uniformer.mmcv as mmcv -from .base_runner import BaseRunner -from .builder import RUNNERS -from .checkpoint import save_checkpoint -from .hooks import IterTimerHook -from .utils import get_host_info - - -class IterLoader: - - def __init__(self, dataloader): - self._dataloader = dataloader - self.iter_loader = iter(self._dataloader) - self._epoch = 0 - - @property - def epoch(self): - return self._epoch - - def __next__(self): - try: - data = next(self.iter_loader) - except StopIteration: - self._epoch += 1 - if hasattr(self._dataloader.sampler, 'set_epoch'): - self._dataloader.sampler.set_epoch(self._epoch) - time.sleep(2) # Prevent possible deadlock during epoch transition - self.iter_loader = iter(self._dataloader) - data = next(self.iter_loader) - - return data - - def __len__(self): - return len(self._dataloader) - - -@RUNNERS.register_module() -class IterBasedRunner(BaseRunner): - """Iteration-based Runner. - - This runner train models iteration by iteration. - """ - - def train(self, data_loader, **kwargs): - self.model.train() - self.mode = 'train' - self.data_loader = data_loader - self._epoch = data_loader.epoch - data_batch = next(data_loader) - self.call_hook('before_train_iter') - outputs = self.model.train_step(data_batch, self.optimizer, **kwargs) - if not isinstance(outputs, dict): - raise TypeError('model.train_step() must return a dict') - if 'log_vars' in outputs: - self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) - self.outputs = outputs - self.call_hook('after_train_iter') - self._inner_iter += 1 - self._iter += 1 - - @torch.no_grad() - def val(self, data_loader, **kwargs): - self.model.eval() - self.mode = 'val' - self.data_loader = data_loader - data_batch = next(data_loader) - self.call_hook('before_val_iter') - outputs = self.model.val_step(data_batch, **kwargs) - if not isinstance(outputs, dict): - raise TypeError('model.val_step() must return a dict') - if 'log_vars' in outputs: - self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) - self.outputs = outputs - self.call_hook('after_val_iter') - self._inner_iter += 1 - - def run(self, data_loaders, workflow, max_iters=None, **kwargs): - """Start running. - - Args: - data_loaders (list[:obj:`DataLoader`]): Dataloaders for training - and validation. - workflow (list[tuple]): A list of (phase, iters) to specify the - running order and iterations. E.g, [('train', 10000), - ('val', 1000)] means running 10000 iterations for training and - 1000 iterations for validation, iteratively. - """ - assert isinstance(data_loaders, list) - assert mmcv.is_list_of(workflow, tuple) - assert len(data_loaders) == len(workflow) - if max_iters is not None: - warnings.warn( - 'setting max_iters in run is deprecated, ' - 'please set max_iters in runner_config', DeprecationWarning) - self._max_iters = max_iters - assert self._max_iters is not None, ( - 'max_iters must be specified during instantiation') - - work_dir = self.work_dir if self.work_dir is not None else 'NONE' - self.logger.info('Start running, host: %s, work_dir: %s', - get_host_info(), work_dir) - self.logger.info('Hooks will be executed in the following order:\n%s', - self.get_hook_info()) - self.logger.info('workflow: %s, max: %d iters', workflow, - self._max_iters) - self.call_hook('before_run') - - iter_loaders = [IterLoader(x) for x in data_loaders] - - self.call_hook('before_epoch') - - while self.iter < self._max_iters: - for i, flow in enumerate(workflow): - self._inner_iter = 0 - mode, iters = flow - if not isinstance(mode, str) or not hasattr(self, mode): - raise ValueError( - 'runner has no method named "{}" to run a workflow'. - format(mode)) - iter_runner = getattr(self, mode) - for _ in range(iters): - if mode == 'train' and self.iter >= self._max_iters: - break - iter_runner(iter_loaders[i], **kwargs) - - time.sleep(1) # wait for some hooks like loggers to finish - self.call_hook('after_epoch') - self.call_hook('after_run') - - def resume(self, - checkpoint, - resume_optimizer=True, - map_location='default'): - """Resume model from checkpoint. - - Args: - checkpoint (str): Checkpoint to resume from. - resume_optimizer (bool, optional): Whether resume the optimizer(s) - if the checkpoint file includes optimizer(s). Default to True. - map_location (str, optional): Same as :func:`torch.load`. - Default to 'default'. - """ - if map_location == 'default': - device_id = torch.cuda.current_device() - checkpoint = self.load_checkpoint( - checkpoint, - map_location=lambda storage, loc: storage.cuda(device_id)) - else: - checkpoint = self.load_checkpoint( - checkpoint, map_location=map_location) - - self._epoch = checkpoint['meta']['epoch'] - self._iter = checkpoint['meta']['iter'] - self._inner_iter = checkpoint['meta']['iter'] - if 'optimizer' in checkpoint and resume_optimizer: - if isinstance(self.optimizer, Optimizer): - self.optimizer.load_state_dict(checkpoint['optimizer']) - elif isinstance(self.optimizer, dict): - for k in self.optimizer.keys(): - self.optimizer[k].load_state_dict( - checkpoint['optimizer'][k]) - else: - raise TypeError( - 'Optimizer should be dict or torch.optim.Optimizer ' - f'but got {type(self.optimizer)}') - - self.logger.info(f'resumed from epoch: {self.epoch}, iter {self.iter}') - - def save_checkpoint(self, - out_dir, - filename_tmpl='iter_{}.pth', - meta=None, - save_optimizer=True, - create_symlink=True): - """Save checkpoint to file. - - Args: - out_dir (str): Directory to save checkpoint files. - filename_tmpl (str, optional): Checkpoint file template. - Defaults to 'iter_{}.pth'. - meta (dict, optional): Metadata to be saved in checkpoint. - Defaults to None. - save_optimizer (bool, optional): Whether save optimizer. - Defaults to True. - create_symlink (bool, optional): Whether create symlink to the - latest checkpoint file. Defaults to True. - """ - if meta is None: - meta = {} - elif not isinstance(meta, dict): - raise TypeError( - f'meta should be a dict or None, but got {type(meta)}') - if self.meta is not None: - meta.update(self.meta) - # Note: meta.update(self.meta) should be done before - # meta.update(epoch=self.epoch + 1, iter=self.iter) otherwise - # there will be problems with resumed checkpoints. - # More details in https://github.com/open-mmlab/mmcv/pull/1108 - meta.update(epoch=self.epoch + 1, iter=self.iter) - - filename = filename_tmpl.format(self.iter + 1) - filepath = osp.join(out_dir, filename) - optimizer = self.optimizer if save_optimizer else None - save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta) - # in some environments, `os.symlink` is not supported, you may need to - # set `create_symlink` to False - if create_symlink: - dst_file = osp.join(out_dir, 'latest.pth') - if platform.system() != 'Windows': - mmcv.symlink(filename, dst_file) - else: - shutil.copy(filepath, dst_file) - - def register_training_hooks(self, - lr_config, - optimizer_config=None, - checkpoint_config=None, - log_config=None, - momentum_config=None, - custom_hooks_config=None): - """Register default hooks for iter-based training. - - Checkpoint hook, optimizer stepper hook and logger hooks will be set to - `by_epoch=False` by default. - - Default hooks include: - - +----------------------+-------------------------+ - | Hooks | Priority | - +======================+=========================+ - | LrUpdaterHook | VERY_HIGH (10) | - +----------------------+-------------------------+ - | MomentumUpdaterHook | HIGH (30) | - +----------------------+-------------------------+ - | OptimizerStepperHook | ABOVE_NORMAL (40) | - +----------------------+-------------------------+ - | CheckpointSaverHook | NORMAL (50) | - +----------------------+-------------------------+ - | IterTimerHook | LOW (70) | - +----------------------+-------------------------+ - | LoggerHook(s) | VERY_LOW (90) | - +----------------------+-------------------------+ - | CustomHook(s) | defaults to NORMAL (50) | - +----------------------+-------------------------+ - - If custom hooks have same priority with default hooks, custom hooks - will be triggered after default hooks. - """ - if checkpoint_config is not None: - checkpoint_config.setdefault('by_epoch', False) - if lr_config is not None: - lr_config.setdefault('by_epoch', False) - if log_config is not None: - for info in log_config['hooks']: - info.setdefault('by_epoch', False) - super(IterBasedRunner, self).register_training_hooks( - lr_config=lr_config, - momentum_config=momentum_config, - optimizer_config=optimizer_config, - checkpoint_config=checkpoint_config, - log_config=log_config, - timer_config=IterTimerHook(), - custom_hooks_config=custom_hooks_config) diff --git a/spaces/talhaty/Faceswapper/app.py b/spaces/talhaty/Faceswapper/app.py deleted file mode 100644 index 2954e36d4f57c4b351f2ed926d442363b64ec971..0000000000000000000000000000000000000000 --- a/spaces/talhaty/Faceswapper/app.py +++ /dev/null @@ -1,41 +0,0 @@ -import gradio as gr -import subprocess -import shutil -import os - -def run_scripts(target, source, use_face_enhancer): - if target is None or (not use_face_enhancer and source is None): - return None - target_extension = os.path.splitext(target.name)[-1] - output_path1 = "output1" + target_extension - output_path2 = "output2" + target_extension - - if not use_face_enhancer: - # Run both scripts - cmd1 = ["python3", "run.py", "-s", source.name, "-t", target.name, "-o", output_path1, "--frame-processor", "face_swapper"] - subprocess.run(cmd1) - - # Run the second script - cmd2 = ["python3", "run.py", "-t", target.name if use_face_enhancer else output_path1, "-o", output_path2, "--frame-processor", "face_enhancer"] - subprocess.run(cmd2) - - if not use_face_enhancer: - os.remove(source.name) - os.remove(target.name) - - return output_path2 - -iface = gr.Interface( - fn=run_scripts, - inputs=[ - "file", - "file", - gr.inputs.Checkbox(default=False, label="Use only Face Enhancer") # New checkbox input - ], - outputs="file", - title="Face swapper", - description="Upload a target image/video and a source image to swap faces.", - live=True -) - -iface.launch() diff --git a/spaces/tanish2502/ChatGPT-AI-Assistant-App/app.py b/spaces/tanish2502/ChatGPT-AI-Assistant-App/app.py deleted file mode 100644 index ccc01c474b99318e223c33a059d915d6e41d02d0..0000000000000000000000000000000000000000 --- a/spaces/tanish2502/ChatGPT-AI-Assistant-App/app.py +++ /dev/null @@ -1,83 +0,0 @@ -import gradio as gr -import openai -import os -from dotenv import load_dotenv -from pydub import AudioSegment - -load_dotenv() - -#accessing openapi Key -openai.api_key = os.getenv("OPENAI_API_KEY") - -audio_messages = [{"role": "system", "content": 'You are an AI assistant expert. Respond to all input in precise, crisp and easy to understand language.'}] -text_messages = [{"role": "system", "content": 'You are an AI assistant expert. Respond to all input in precise, crisp and easy to understand language.'}] -global user_text_input, text_output, user_audio_input, audio_output - -""" -It seems like the gr.Audio source is not generating a WAV file, which is required for the openai.Audio.transcribe() method to work. -To convert the audio file to WAV format, i have used a library like Pydub. -""" - -def audio_transcribe(audio): - global audio_messages - audio_message = audio_messages - - #audio processing to whisper API. - audio_file = AudioSegment.from_file(audio) - audio_file.export("temp.wav", format="wav") - final_audio_file = open("temp.wav", "rb") - transcript = openai.Audio.transcribe("whisper-1", final_audio_file) - os.remove("temp.wav") - - #transcripted input to chatGPT API for chatCompletion - audio_message.append({"role": "user", "content": transcript["text"]}) # type: ignore - response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=audio_message) - system_message = response["choices"][0]["message"] # type: ignore - audio_message.append(system_message) - - chat_transcript = "" - for message in audio_message: - if message['role'] != 'system': - chat_transcript += message['role'] + ": " + message['content'] + "\n\n" - - return chat_transcript - -def text_transcribe(name): - global text_messages - text_message = text_messages - user_text_input.update("") - #transcripted input to chatGPT API - text_message.append({"role": "user", "content": name}) # type: ignore - response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=text_message) - system_message = response["choices"][0]["message"] # type: ignore - text_message.append(system_message) - - chat_transcript = "" - for message in text_message: - if message['role'] != 'system': - chat_transcript += message['role'] + ": " + message['content'] + "\n\n" - return chat_transcript - -title = """

    Your Chat-GPT AI Assistant at your Service!! 😎

    """ -with gr.Blocks(theme=gr.themes.Soft()) as demo: - gr.HTML(title) - with gr.Tab("Audio Input"): - with gr.Row(): - user_audio_input = (gr.Audio(source="microphone", type="filepath", label="Speak Here")) - audio_input = user_audio_input - audio_output = gr.Textbox(label="AI Response", lines=20, placeholder="AI Response will be displayed here...") - with gr.Row(): - audio_submit_button = gr.Button("Submit") - with gr.Tab("Text Input"): - with gr.Row(): - user_text_input = (gr.Textbox(label="Type Here", lines=20, placeholder="Type your message here...")) - text_input = user_text_input - text_output = gr.Textbox(label="AI Response", lines=20, placeholder="AI Response will be displayed here...") - with gr.Row(): - text_submit_button = gr.Button("Submit") - audio_submit_button.click(fn=audio_transcribe, inputs=audio_input, outputs=audio_output) - text_submit_button.click(fn=text_transcribe, inputs=text_input, outputs=text_output) - - gr.Markdown("
    Made with ❤️ by Tanish Gupta. Credits to 🤗 Spaces for Hosting this App
    ") - -demo.launch() diff --git a/spaces/tanishqvashisht/comicInator/README.md b/spaces/tanishqvashisht/comicInator/README.md deleted file mode 100644 index 4e4aa3573b1cfb76064387c6cef48bcbfc866d88..0000000000000000000000000000000000000000 --- a/spaces/tanishqvashisht/comicInator/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: ComicInator -emoji: 😻 -colorFrom: red -colorTo: green -sdk: streamlit -sdk_version: 1.25.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/terfces0erbo/CollegeProjectV2/Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll Fixed.md b/spaces/terfces0erbo/CollegeProjectV2/Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll Fixed.md deleted file mode 100644 index dcc5935ff22d9c9d58c7c6cce0a3ac25920e1ce6..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll Fixed.md +++ /dev/null @@ -1,97 +0,0 @@ -## Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll - - - -**CLICK HERE ✏ ✏ ✏ [https://maudaracte.blogspot.com/?file=2twRLV](https://maudaracte.blogspot.com/?file=2twRLV)** - - - -# How to Download and Install Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll - - - -If you are a fan of car simulation games, you might be interested in downloading and installing Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll. This is a bundle of 13 DLCs that add more licensed cars, engines, rims, and features to the base game of Car Mechanic Simulator 2018. In this article, we will show you how to download and install Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll in a few easy steps. - - - -## What is Car Mechanic Simulator 2018 - DLC Gold Pack? - - - -Car Mechanic Simulator 2018 - DLC Gold Pack is a collection of 13 downloadable content packs for Car Mechanic Simulator 2018, a popular car simulation game developed by Red Dot Games and published by PlayWay S.A. The DLCs included in this bundle are: - - - -- Bentley DLC - -- Dodge DLC - -- Dodge Modern DLC - -- Ford DLC - -- Lotus DLC - -- Maserati DLC - -- Pagani DLC - -- Plymouth DLC - -- Porsche DLC - -- Ram DLC - -- Mercedes-Benz DLC - -- Chrysler DLC - -- Rims DLC - - - -Each DLC adds one or more licensed cars from famous brands, such as Bentley, Dodge, Ford, Lotus, Maserati, Pagani, Plymouth, Porsche, Ram, Mercedes-Benz, and Chrysler. Some DLCs also add new engines, rims, tuning options, and liveries. With these DLCs, you can expand your car collection, repair and customize more vehicles, and test them on different tracks. - - - -## Why Download Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll? - - - -Downloading Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll has several advantages over buying the DLCs separately or through Steam. First of all, you can save money by getting all the DLCs in one package at a discounted price. Second, you can enjoy faster download speeds and avoid bandwidth limitations by using a torrent client. Third, you can install the DLCs easily without having to deal with Steam activation or DRM issues. - - - -## How to Download Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll? - - - -To download Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll, you need to follow these steps: - - - -1. Make sure you have Car Mechanic Simulator 2018 installed on your PC. You can buy the base game from Steam or other online stores. - -2. Download a torrent client, such as uTorrent or BitTorrent. Install it on your PC and run it. - -3. Go to a reliable torrent site, such as The Pirate Bay or Kickass Torrents. Search for "Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll" or use this link: [link]. Choose a torrent with many seeders and leechers for faster download speeds. - -4. Download the torrent file and open it with your torrent client. Choose a destination folder for the downloaded files and start the download process. - -5. Wait until the download is complete. You should have a folder with the following files: [files]. - -6. Extract the files using WinRAR or 7-Zip. You should have another folder with the following files: [files]. - -7. Copy and paste the files into your Car Mechanic Simulator 2018 installation folder. Overwrite any existing files if prompted. - -8. Run the game and enjoy the new content. - - - -## Conclusion - - - -Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Fulll is a great way to enhance your car simulation experience with more cars, engines, rims, and features. By following our guide, you can download and install Car Mechanic Simulator 2018 - DLC Gold Pack Torrent Full - - 1b8d091108 \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Crazy Talk Animator Pro 1.2 !EXCLUSIVE! Crack.md b/spaces/terfces0erbo/CollegeProjectV2/Crazy Talk Animator Pro 1.2 !EXCLUSIVE! Crack.md deleted file mode 100644 index a6dd23271fd984c41906667bb9d86d24583d8aec..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Crazy Talk Animator Pro 1.2 !EXCLUSIVE! Crack.md +++ /dev/null @@ -1,6 +0,0 @@ -

    crazy talk animator pro 1.2 crack


    Download File ✫✫✫ https://bytlly.com/2uGlwF



    -
    - 3cee63e6c2
    -
    -
    -

    diff --git a/spaces/test12356/SUI-svc-3.0/vdecoder/hifigan/utils.py b/spaces/test12356/SUI-svc-3.0/vdecoder/hifigan/utils.py deleted file mode 100644 index 84bff024f4d2e2de194b2a88ee7bbe5f0d33f67c..0000000000000000000000000000000000000000 --- a/spaces/test12356/SUI-svc-3.0/vdecoder/hifigan/utils.py +++ /dev/null @@ -1,68 +0,0 @@ -import glob -import os -import matplotlib -import torch -from torch.nn.utils import weight_norm -matplotlib.use("Agg") -import matplotlib.pylab as plt - - -def plot_spectrogram(spectrogram): - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - - fig.canvas.draw() - plt.close() - - return fig - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def apply_weight_norm(m): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - weight_norm(m) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def load_checkpoint(filepath, device): - assert os.path.isfile(filepath) - print("Loading '{}'".format(filepath)) - checkpoint_dict = torch.load(filepath, map_location=device) - print("Complete.") - return checkpoint_dict - - -def save_checkpoint(filepath, obj): - print("Saving checkpoint to {}".format(filepath)) - torch.save(obj, filepath) - print("Complete.") - - -def del_old_checkpoints(cp_dir, prefix, n_models=2): - pattern = os.path.join(cp_dir, prefix + '????????') - cp_list = glob.glob(pattern) # get checkpoint paths - cp_list = sorted(cp_list)# sort by iter - if len(cp_list) > n_models: # if more than n_models models are found - for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models - open(cp, 'w').close()# empty file contents - os.unlink(cp)# delete file (move to trash when using Colab) - - -def scan_checkpoint(cp_dir, prefix): - pattern = os.path.join(cp_dir, prefix + '????????') - cp_list = glob.glob(pattern) - if len(cp_list) == 0: - return None - return sorted(cp_list)[-1] - diff --git a/spaces/theekshana/boardpac_chat_app_test/app_agent.py b/spaces/theekshana/boardpac_chat_app_test/app_agent.py deleted file mode 100644 index d64596e2d7ab9182b7d5cb969767ecce39db5c64..0000000000000000000000000000000000000000 --- a/spaces/theekshana/boardpac_chat_app_test/app_agent.py +++ /dev/null @@ -1,246 +0,0 @@ -""" -Python Backend API to chat with private data - -08/16/2023 -D.M. Theekshana Samaradiwakara - -python -m streamlit run app.py -""" - -import os -import time -import streamlit as st -from streamlit.logger import get_logger - -logger = get_logger(__name__) - -from ui.htmlTemplates import css, bot_template, user_template, source_template -from config import MODELS, DATASETS - -from qaPipeline import QAPipeline -import qaPipeline_functions -from faissDb import create_faiss - -# loads environment variables -from dotenv import load_dotenv -load_dotenv() - -isHuggingFaceHubEnabled = os.environ.get('ENABLE_HUGGINGFSCE_HUB_MODELS') -isOpenAiApiEnabled = os.environ.get('ENABLE_OPENAI_API_MODELS') - -st.set_page_config(page_title="Chat with data", - page_icon=":books:") -st.write(css, unsafe_allow_html=True) - -qaPipeline = QAPipeline() -# qaPipeline = qaPipeline_functions - -def initialize_session_state(): - # Initialise all session state variables with defaults - SESSION_DEFAULTS = { - "model": MODELS["DEFAULT"], - "dataset": DATASETS["DEFAULT"], - "chat_history": None, - "is_parameters_changed":False, - "show_source_files": False, - "user_question":'', - } - - for k, v in SESSION_DEFAULTS.items(): - if k not in st.session_state: - st.session_state[k] = v - -def side_bar(): - with st.sidebar: - st.subheader("Chat parameters") - - with st.form('param_form'): - st.info('Info: use openai chat model for best results') - chat_model = st.selectbox( - "Chat model", - MODELS, - key="chat_model", - help="Select the LLM model for the chat", - # on_change=update_parameters_change, - ) - - # data_source = st.selectbox( - # "dataset", - # DATASETS, - # key="data_source", - # help="Select the private data_source for the chat", - # on_change=update_parameters_change, - # ) - - st.session_state.dataset = "DEFAULT" - - show_source = st.checkbox( - label="show source files", - key="show_source", - help="Select this to show relavant source files for the query", - # on_change=update_parameters_change, - ) - - submitted = st.form_submit_button( - "Save Parameters", - # on_click=update_parameters_change - ) - - if submitted: - parameters_change_button(chat_model, show_source) - - - # if st.session_state.is_parameters_changed: - # st.button("Update", - # on_click=parameters_change_button, - # args=[chat_model, show_source] - # ) - - st.markdown("\n") - - # if st.button("Create FAISS db"): - # try: - # with st.spinner('creating faiss vector store'): - # create_faiss() - # st.success('faiss saved') - # except Exception as e: - # st.error(f"Error : {e}")#, icon=":books:") - # return - - st.markdown( - "### How to use\n" - "1. Select the chat model\n" # noqa: E501 - "2. Select \"show source files\" to show the source files related to the answer.📄\n" - "3. Ask a question about the documents💬\n" - ) - - -def chat_body(): - st.header("Chat with your own data:") - with st.form('chat_body'): - - user_question = st.text_input( - "Ask a question about your documents:", - placeholder="enter question", - key='user_question', - # on_change=submit_user_question, - ) - - submitted = st.form_submit_button( - "Submit", - # on_click=update_parameters_change - ) - - if submitted: - submit_user_question() - - # if user_question: - # submit_user_question() - # # user_question = False - - - -def submit_user_question(): - with st.spinner("Processing"): - user_question = st.session_state.user_question - # st.success(user_question) - handle_userinput(user_question) - # st.session_state.user_question='' - - -def main(): - - initialize_session_state() - - side_bar() - - chat_body() - - -def update_parameters_change(): - st.session_state.is_parameters_changed = True - - -def parameters_change_button(chat_model, show_source): - st.session_state.model = chat_model - st.session_state.dataset = "DEFAULT" - st.session_state.show_source_files = show_source - st.session_state.is_parameters_changed = False - - alert = st.success("chat parameters updated") - time.sleep(1) # Wait for 3 seconds - alert.empty() # Clear the alert - -@st.cache_data -def get_answer_from_backend(query, model, dataset): - # response = qaPipeline.run(query=query, model=model, dataset=dataset) - response = qaPipeline.run_agent(query=query, model=model, dataset=dataset) - return response - - -def show_query_response(query, response, show_source_files): - docs = [] - if isinstance(response, dict): - answer, docs = response['answer'], response['source_documents'] - else: - answer = response - - st.write(user_template.replace( - "{{MSG}}", query), unsafe_allow_html=True) - st.write(bot_template.replace( - "{{MSG}}", answer ), unsafe_allow_html=True) - - if show_source_files: - # st.write(source_template.replace( - # "{{MSG}}", "source files" ), unsafe_allow_html=True) - - if len(docs)>0 : - st.markdown("#### source files : ") - for source in docs: - # st.info(source.metadata) - with st.expander(source.metadata["source"]): - st.markdown(source.page_content) - - # st.write(response) - - -def is_query_valid(query: str) -> bool: - if (not query) or (query.strip() == ''): - st.error("Please enter a question!") - return False - return True - - -def handle_userinput(query): - # Get the answer from the chain - try: - if not is_query_valid(query): - st.stop() - - model = MODELS[st.session_state.model] - dataset = DATASETS[st.session_state.dataset] - show_source_files = st.session_state.show_source_files - - # Try to access openai and deeplake - print(f">\n model: {model} \n dataset : {dataset} \n show_source_files : {show_source_files}") - - response = get_answer_from_backend(query, model, dataset) - - show_query_response(query, response, show_source_files) - - - except Exception as e: - # logger.error(f"Answer retrieval failed with {e}") - st.error(f"Error ocuured! see log info for more details.")#, icon=":books:") - print(f"Streamlit handle_userinput Error : {e}")#, icon=":books:") - return - - -if __name__ == "__main__": - main() - -# initialize_session_state() - -# side_bar() - -# chat_body() \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Acidic Wow Bot Download The Best WoW Bot for Shadowlands Classic and More.md b/spaces/tialenAdioni/chat-gpt-api/logs/Acidic Wow Bot Download The Best WoW Bot for Shadowlands Classic and More.md deleted file mode 100644 index 41bb60b2ad2295c48dec9bfc0a814e2f61a8e06c..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Acidic Wow Bot Download The Best WoW Bot for Shadowlands Classic and More.md +++ /dev/null @@ -1,69 +0,0 @@ - -

    Acidic Wow Bot: A Non-Detectable World of Warcraft Bot

    -

    If you are looking for a way to automate your World of Warcraft gameplay, you might be interested in Acidic Wow Bot. This is a project that aims to develop a non-detectable wow bot that can travel without human interference. In this article, we will explain what Acidic Wow Bot is, how it works, and where you can download it.

    -

    acidic wow bot download


    DOWNLOAD » https://urlcod.com/2uK9T0



    -

    What is Acidic Wow Bot?

    -

    Acidic Wow Bot is a software that runs on your computer and controls your World of Warcraft character. It can perform various tasks such as leveling, farming, questing, and PvPing. The goal of Acidic Wow Bot is to make your wow experience easier and more enjoyable, while avoiding detection by Blizzard's anti-cheat system.

    -

    How does Acidic Wow Bot work?

    -

    Acidic Wow Bot works by using a combination of programming languages such as Delphi, Lua, and AutoIt. It uses a predefined set of coordinates to navigate the game world and interact with NPCs and enemies. It also uses a scripting platform called Awful WoW Scripting that provides PvE and PvP rotations, bots, and tools for various classes and situations.

    -

    Where can I download Acidic Wow Bot?

    -

    You can download Acidic Wow Bot from SourceForge.net[^1^], a website that hosts open source software projects. You can also find more information about Acidic Wow Bot on elitepvpers.com[^2^], a forum for wow hacks, cheats, and bots. However, be aware that using Acidic Wow Bot or any other wow bot is against Blizzard's terms of service and may result in account suspension or ban. Use it at your own risk.

    - -

    What are the benefits of using Acidic Wow Bot?

    -

    Using Acidic Wow Bot can save you a lot of time and effort in World of Warcraft. You can level up your character faster, collect more gold and resources, complete more quests and achievements, and dominate in PvP battles. You can also enjoy other aspects of the game such as exploring, socializing, or customizing your character without worrying about grinding or farming.

    -

    What are the risks of using Acidic Wow Bot?

    -

    Using Acidic Wow Bot or any other wow bot is not without risks. Blizzard has a strict policy against botting and cheating in World of Warcraft. They use various methods to detect and ban bot users, such as Warden, their anti-cheat software, or manual reports from other players. If you are caught using Acidic Wow Bot, you may lose your account, your progress, and your money. Therefore, you should always be careful and discreet when using Acidic Wow Bot or any other wow bot.

    -

    How to use Acidic Wow Bot safely?

    -

    There is no guarantee that you can use Acidic Wow Bot safely, but there are some tips that can help you reduce the chances of getting banned. First, you should always use the latest version of Acidic Wow Bot and Awful WoW Scripting, as they may have updated features or fixes that can avoid detection. Second, you should avoid using Acidic Wow Bot for long periods of time or in crowded areas, as this may raise suspicion from other players or GMs. Third, you should use a VPN or a proxy to hide your IP address and location from Blizzard. Fourth, you should never share your account or your bot with anyone else, as this may compromise your security and privacy.

    -

    how to install acidic wow bot for free
    -acidic wow bot cracked version download
    -best settings for acidic wow bot
    -acidic wow bot review and tutorial
    -acidic wow bot license key generator
    -where to buy acidic wow bot cheap
    -acidic wow bot vs other wow bots
    -acidic wow bot features and benefits
    -acidic wow bot support and updates
    -acidic wow bot download link 2023
    -acidic wow bot for mac and windows
    -acidic wow bot alternatives and comparisons
    -acidic wow bot refund policy and guarantee
    -acidic wow bot testimonials and feedback
    -acidic wow bot discount code and coupon
    -acidic wow bot troubleshooting and errors
    -acidic wow bot compatibility and requirements
    -acidic wow bot demo and trial
    -acidic wow bot affiliate program and commission
    -acidic wow bot custom scripts and addons
    -how to use acidic wow bot safely and securely
    -acidic wow bot tips and tricks
    -acidic wow bot forum and community
    -acidic wow bot FAQ and help
    -acidic wow bot pros and cons
    -how to uninstall acidic wow bot completely
    -how to update acidic wow bot to the latest version
    -how to backup and restore acidic wow bot settings
    -how to optimize acidic wow bot performance and speed
    -how to make money with acidic wow bot
    -how to avoid bans with acidic wow bot
    -how to level up fast with acidic wow bot
    -how to farm gold with acidic wow bot
    -how to do quests with acidic wow bot
    -how to pvp with acidic wow bot
    -how to raid with acidic wow bot
    -how to craft with acidic wow bot
    -how to fish with acidic wow bot
    -how to gather with acidic wow bot
    -how to auction with acidic wow bot
    -how to chat with acidic wow bot
    -how to roleplay with acidic wow bot
    -how to explore with acidic wow bot
    -how to customize acidic wow bot appearance and sound
    -how to train skills with acidic wow bot
    -how to manage inventory with acidic wow bot
    -how to use hotkeys with acidic wow bot
    -how to automate tasks with acidic wow bot
    -how to monitor stats with acidic wow bot
    -how to report bugs with acidic wow bot

    e753bf7129
    -
    -
    \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/HD Online Player (Love Aaj Kal 2 Full Movie In Hindi Cast) - Meet The Stars.md b/spaces/tialenAdioni/chat-gpt-api/logs/HD Online Player (Love Aaj Kal 2 Full Movie In Hindi Cast) - Meet The Stars.md deleted file mode 100644 index 3d414601a7d57a5d54cdf44343f7b33266c186fa..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/HD Online Player (Love Aaj Kal 2 Full Movie In Hindi Cast) - Meet The Stars.md +++ /dev/null @@ -1,184 +0,0 @@ -
    -

    HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W): The Best Way to Watch the Latest Bollywood Romance

    - -

    If you are a fan of Bollywood movies, you probably know that Love Aaj Kal 2 is one of the most anticipated releases of 2020. The movie is a sequel to the 2009 hit Love Aaj Kal, which starred Saif Ali Khan and Deepika Padukone. The movie is directed by Imtiaz Ali, who is known for his romantic dramas such as Jab We Met, Rockstar, and Tamasha.

    - -

    Love Aaj Kal 2 features Kartik Aaryan and Sara Ali Khan as the lead pair, who play two different individuals on a journey of love, loss and life through the phases of reincarnation. The movie also stars Randeep Hooda and Arushi Sharma in supporting roles. The movie explores the concept of love in different eras and how it changes with time and circumstances.

    -

    HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W)


    Download Zip ———>>> https://urlcod.com/2uK7Ay



    - -

    But how can you watch this movie in HD quality on your device? Well, you can use HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W), which is a free and easy way to stream or download the movie online. HD Online Player is a web-based application that allows you to watch any movie or video in high definition on your browser or device. You can also download the movie or video to watch later offline.

    - -

    What is HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W)?

    - -

    HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W) is a web-based application that lets you watch Love Aaj Kal 2 in HD quality on your device. You can access HD Online Player from any browser or device and enjoy the movie without any hassle or interruption.

    - -

    HD Online Player works by fetching the movie or video from various sources and streaming it to your device in the best possible quality. You can also choose the resolution, format, and language of the movie or video according to your preference. You can also adjust the volume, brightness, contrast, and speed of the playback.

    - -

    HD Online Player also allows you to download the movie or video to your device for offline viewing. You can choose the download quality, size, and location of the file according to your convenience. You can also pause, resume, or cancel the download at any time.

    - -

    How to Use HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W)?

    - -

    Using HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W) is very simple and easy. You can follow these steps to watch or download Love Aaj Kal 2 in HD quality on your device:

    - -
      -
    1. Go to the HD Online Player website and type "Love Aaj Kal 2" in the search box.
    2. -
    3. Click on the result that says "Love Aaj Kal 2 Full Movie In Hindi W".
    4. -
    5. On the page that opens, you will see a player that will start playing the movie in HD quality. You can also see some options below the player such as play, pause, stop, full screen, volume, resolution, format, language, subtitles, etc.
    6. -
    7. If you want to stream the movie online, you can just watch it on the player. You can also use the options below the player to customize your viewing experience.
    8. -
    9. If you want to download the movie offline, you can click on the download button below the player. You can also choose the download quality, size, and location of the file before downloading.
    10. -
    11. Once the download is complete, you can find the file on your device and watch it anytime you want.
    12. -
    - -

    Congratulations! You have successfully used HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W) to watch or download Love Aaj Kal 2 in HD quality on your device.

    - -

    Why Should You Use HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W)?

    - -

    Using HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W) is a great way to watch or download Love Aaj Kal 2 in HD quality on your device. Here are some of the benefits of using HD Online Player:

    -

    Watch Love Aaj Kal 2 online in HD quality with subtitles
    -Love Aaj Kal 2 full movie streaming on Netflix India
    -How to download Love Aaj Kal 2 HD movie for free
    -Love Aaj Kal 2 Hindi movie review and ratings
    -Love Aaj Kal 2 cast, plot, songs and trivia
    -HD Online Player for Bollywood movies like Love Aaj Kal 2
    -Love Aaj Kal 2 vs Love Aaj Kal: Which one is better?
    -Best romantic scenes from Love Aaj Kal 2 movie
    -Love Aaj Kal 2 box office collection and verdict
    -Watch Love Aaj Kal 2 online legally and safely
    -Love Aaj Kal 2 full movie in Hindi with English subtitles
    -Love Aaj Kal 2 HD movie download link and torrent
    -Love Aaj Kal 2 movie online watch free on Hotstar
    -Love Aaj Kal 2 Hindi movie trailer and release date
    -Love Aaj Kal 2 songs download mp3 and video
    -HD Online Player compatible with Love Aaj Kal 2 movie format
    -Love Aaj Kal 2 movie memes and jokes
    -Love Aaj Kal 2 movie quotes and dialogues
    -Watch Love Aaj Kal 2 online without buffering or ads
    -Love Aaj Kal 2 full movie in Hindi watch online Dailymotion
    -Love Aaj Kal 2 movie behind the scenes and making
    -Love Aaj Kal 2 movie awards and nominations
    -Love Aaj Kal 2 HD online player for PC and mobile
    -Love Aaj Kal 2 movie online booking and tickets
    -Love Aaj Kal 2 movie wallpapers and posters
    -Watch Love Aaj Kal 2 online with friends and family
    -Love Aaj Kal 2 full movie in Hindi dubbed watch online
    -Love Aaj Kal 2 movie controversies and scandals
    -Love Aaj Kal 2 songs lyrics and meaning
    -HD Online Player for watching Hindi movies online free
    -Love Aaj Kal 2 movie fan art and edits
    -Love Aaj Kal 2 movie analysis and criticism
    -Watch Love Aaj Kal 2 online in full HD resolution
    -Love Aaj Kal 2 full movie in Hindi watch online YouTube
    -Love Aaj Kal 2 songs playlist and ranking
    -HD Online Player for streaming movies online fast and smooth
    -Love Aaj Kal 2 movie facts and trivia you didn't know
    -Watch Love Aaj Kal 2 online with high-quality audio and video
    -Love Aaj Kal 2 full movie in Hindi watch online on Amazon Prime Video
    -Love Aaj Kal 2 songs video download HD quality
    -HD Online Player for playing movies offline without internet connection
    -Watch Love Aaj Kal 2 online with subtitles in different languages
    -Love Aaj Kal 2 full movie in Hindi watch online on Zee5
    -Love Aaj Kal 2 songs ringtone download free
    -HD Online Player for watching movies on big screen TV
    -Watch Love Aaj Kal 2 online with interactive features and chat
    -Love Aaj Kal 2 full movie in Hindi watch online on SonyLIV
    -Love Aaj Kal 2 songs remix and mashup
    -HD Online Player for watching movies in VR mode
    -Watch Love Aaj Kal 2 online with bonus content and extras

    - -
      -
    • You can watch or download Love Aaj Kal 2 in HD quality without any hassle or interruption.
    • -
    • You can access HD Online Player from any browser or device and enjoy the movie on any screen size.
    • -
    • You can customize your viewing experience by choosing the resolution, format, language, subtitles, etc. of the movie.
    • -
    • You can download the movie offline and watch it anytime you want without any internet connection.
    • -
    • You can use HD Online Player for free and without any registration or subscription.
    • -
    - -

    These are just some of the benefits of using HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W). There are many more that you can discover by yourself once you start using it.

    -
    What are the Reviews of Love Aaj Kal 2?
    - -

    Love Aaj Kal 2 has received mixed reviews from critics and audiences alike. The movie has been praised for its music, cinematography, and performances of the lead actors. However, the movie has also been criticized for its weak script, confusing plot, and lack of originality.

    - -

    The movie has a rating of 5.0 out of 10 on IMDb, based on 11,760 user ratings. The movie has a rating of 2.0 out of 5 on Times of India, based on 18 critic reviews. The movie has a rating of 1.8 out of 5 on BookMyShow, based on 34,288 user ratings.

    - -

    Some of the positive reviews of the movie are:

    - -
    -

    "Love Aaj Kal 2 is a visually stunning and emotionally engaging film that explores the complexities of love and relationships in different eras. Kartik Aaryan and Sara Ali Khan deliver stellar performances and share a sizzling chemistry on screen. The music by Pritam is soulful and catchy. The movie is a must-watch for fans of Imtiaz Ali and romantic dramas." - Filmfare

    -
    - -
    -

    "Love Aaj Kal 2 is a refreshing and realistic take on modern-day romance. The movie showcases the struggles and dilemmas of two individuals who are trying to find their true selves and their true love in a fast-changing world. Kartik Aaryan and Sara Ali Khan are impressive as the lead pair and convey their emotions with sincerity and intensity. The movie is a treat for lovers of romance and music." - Bollywood Hungama

    -
    - -

    Some of the negative reviews of the movie are:

    - -
    -

    "Love Aaj Kal 2 is a disappointing and dull film that fails to capture the essence and charm of the original. The movie is a mishmash of cliches, stereotypes, and melodrama that make it hard to connect with the characters or the story. Kartik Aaryan and Sara Ali Khan are wasted in their roles and have no chemistry whatsoever. The movie is a letdown for fans of Imtiaz Ali and romance." - Hindustan Times

    -
    - -
    -

    "Love Aaj Kal 2 is a confusing and boring film that tries to be too clever and too deep but ends up being too silly and too shallow. The movie is a mess of timelines, flashbacks, reincarnations, and coincidences that make no sense at all. Kartik Aaryan and Sara Ali Khan are annoying and irritating in their roles and have no spark or charisma. The movie is a disaster for fans of Imtiaz Ali and romance." - Rediff

    -
    -
    How to Enjoy Love Aaj Kal 2 with HD Online Player?
    - -

    Now that you know how to use HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W) to watch or download Love Aaj Kal 2 in HD quality on your device, you may be wondering how to enjoy the movie to the fullest. Here are some tips to help you have a great time with Love Aaj Kal 2 and HD Online Player:

    - -
      -
    • Choose a comfortable and quiet place to watch the movie. You can watch it on your laptop, tablet, smartphone, or TV. Make sure you have a good Internet connection and enough battery life on your device.
    • -
    • Prepare some snacks and drinks to munch on while watching the movie. You can also invite some friends or family members to join you and share the fun.
    • -
    • Pay attention to the story and the characters of the movie. Try to understand their emotions, motivations, and actions. Appreciate the music, cinematography, and direction of the movie.
    • -
    • Have an open mind and heart when watching the movie. Don't compare it with the original or other movies of the same genre. Don't judge it by its reviews or ratings. Enjoy it for what it is and what it offers.
    • -
    • Share your thoughts and feelings about the movie with others. You can write a review, comment, or rating on HD Online Player or other platforms. You can also discuss the movie with other fans or friends online or offline.
    • -
    - -

    These are just some tips to help you enjoy Love Aaj Kal 2 with HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W). There may be more that you can find by yourself once you start watching it.

    - -Conclusion - -

    HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W) is a web-based application that lets you watch or download Love Aaj Kal 2 in HD quality on your device. You can access HD Online Player from any browser or device and enjoy the movie without any hassle or interruption.

    - -

    Love Aaj Kal 2 is a 2020 Hindi-language romance drama movie that features Kartik Aaryan and Sara Ali Khan as the lead pair, who play two different individuals on a journey of love, loss and life through the phases of reincarnation. The movie also stars Randeep Hooda and Arushi Sharma in supporting roles. The movie explores the concept of love in different eras and how it changes with time and circumstances.

    - -

    If you are a fan of Bollywood movies or just love romance movies, you should definitely check out Love Aaj Kal 2 with HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W). You will not regret it!

    -What are the Songs of Love Aaj Kal 2? - -

    Love Aaj Kal 2 has a soundtrack album composed by Pritam Chakraborty, who also composed the music for the original Love Aaj Kal. The album consists of 11 songs, sung by various artists such as Arijit Singh, Shreya Ghoshal, Mohit Chauhan, Darshan Raval, Antara Mitra, and more. The lyrics are written by Irshad Kamil, who also wrote the lyrics for the original Love Aaj Kal.

    - -

    The songs of Love Aaj Kal 2 are a mix of romantic, upbeat, and melancholic tunes that reflect the mood and theme of the movie. Some of the popular songs of the album are:

    - -
      -
    • "Shayad", sung by Arijit Singh, is a soft and soothing song that expresses the uncertainty and hope of love.
    • -
    • "Haan Main Galat", sung by Arijit Singh and Shashwat Singh, is a peppy and catchy song that celebrates the imperfections and mistakes of love.
    • -
    • "Mehrama", sung by Darshan Raval and Antara Mitra, is a sad and emotional song that depicts the loneliness and longing of love.
    • -
    • "Rahogi Meri", sung by Arijit Singh, is a sweet and romantic song that conveys the promise and commitment of love.
    • -
    • "Yeh Dooriyan", sung by Mohit Chauhan, is a remake of the classic song from the original Love Aaj Kal, that portrays the distance and pain of love.
    • -
    - -

    The songs of Love Aaj Kal 2 have received positive reviews from critics and audiences alike. The songs have also been streamed and downloaded millions of times on various platforms such as YouTube, Spotify, JioSaavn, Gaana, etc.

    - -How to Download Love Aaj Kal 2 Songs with HD Online Player? - -

    If you want to download Love Aaj Kal 2 songs in HD quality on your device, you can use HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W), which is a web-based application that lets you download any movie or video in high definition on your browser or device. You can also use HD Online Player to download Love Aaj Kal 2 songs in MP3 format.

    - -

    To download Love Aaj Kal 2 songs with HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W), you can follow these steps:

    - -
      -
    1. Go to the HD Online Player website and type "Love Aaj Kal 2 songs" in the search box.
    2. -
    3. Click on the result that says "Love Aaj Kal 2 Songs - Full Album | Kartik Aaryan & Sara Ali Khan | Pritam | Jukebox".
    4. -
    5. On the page that opens, you will see a player that will start playing the songs in HD quality. You can also see some options below the player such as play, pause, stop, full screen, volume, resolution, format, language, subtitles, etc.
    6. -
    7. If you want to download the songs in MP3 format, you can click on the download button below the player. You can also choose the download quality, size, and location of the file before downloading.
    8. -
    9. Once the download is complete, you can find the file on your device and listen to it anytime you want.
    10. -
    - -

    Congratulations! You have successfully downloaded Love Aaj Kal 2 songs with HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W).

    -Conclusion - -

    HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W) is a web-based application that lets you watch or download Love Aaj Kal 2 in HD quality on your device. You can also use HD Online Player to watch or download Love Aaj Kal 2 songs in HD or MP3 format. You can access HD Online Player from any browser or device and enjoy the movie or the songs without any hassle or interruption.

    - -

    Love Aaj Kal 2 is a 2020 Hindi-language romance drama movie that features Kartik Aaryan and Sara Ali Khan as the lead pair, who play two different individuals on a journey of love, loss and life through the phases of reincarnation. The movie also stars Randeep Hooda and Arushi Sharma in supporting roles. The movie explores the concept of love in different eras and how it changes with time and circumstances.

    - -

    Love Aaj Kal 2 also has a soundtrack album composed by Pritam Chakraborty, who also composed the music for the original Love Aaj Kal. The album consists of 11 songs, sung by various artists such as Arijit Singh, Shreya Ghoshal, Mohit Chauhan, Darshan Raval, Antara Mitra, and more. The songs are a mix of romantic, upbeat, and melancholic tunes that reflect the mood and theme of the movie.

    - -

    If you are a fan of Bollywood movies or just love romance movies, you should definitely check out Love Aaj Kal 2 with HD Online Player (Love Aaj Kal 2 Full Movie In Hindi W). You will not regret it!

    679dcb208e
    -
    -
    \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Crossy Road How to Play the Viral Hit Game on Your Mobile Device for Free.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Crossy Road How to Play the Viral Hit Game on Your Mobile Device for Free.md deleted file mode 100644 index 860858e7ac6d89fdeba7a08a5d17d052d755a7c6..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Crossy Road How to Play the Viral Hit Game on Your Mobile Device for Free.md +++ /dev/null @@ -1,96 +0,0 @@ - -

    Crossy Road Mobile Free Download: How to Play the Viral Arcade Game on Your Phone

    -

    If you love arcade games, you have probably heard of Crossy Road, the hit game that has been downloaded over 200 million times worldwide. Crossy Road is a simple but addictive game that challenges you to cross roads, train tracks, and rivers without getting hit by cars, trains, or falling into the water. Sounds easy, right? Well, not quite. The game gets harder as you go along, with more obstacles, faster traffic, and unpredictable environments. You also have to collect coins and unlock over 150 different characters, each with their own unique abilities and settings. In this article, we will show you how to download Crossy Road for free on your mobile device, how to play the game, and how to score higher and avoid obstacles. We will also give you our honest review of the game, its pros and cons, and what to expect from future updates.

    -

    crossy road mobile free download


    DOWNLOADhttps://bltlly.com/2uOkfP



    -

    How to Download Crossy Road for Free on Android and iOS Devices

    -

    Crossy Road is available for free on both Android and iOS devices. You can download it from the Google Play Store or the App Store, depending on your device. The game is compatible with most devices that run Android 4.4 or higher or iOS 10.0 or higher. The game also supports Android TV, so you can play it on the big screen with a compatible device. The game does not require an internet connection to play, but you will need one to access some features, such as online multiplayer, video ads, and cloud saving. The game also contains in-app purchases that allow you to buy more coins or remove ads.

    -

    Crossy Road Gameplay: How to Cross Roads, Train Tracks, and Rivers Without Getting Squashed

    -

    The gameplay of Crossy Road is simple but fun. You control a character that has to cross various obstacles without dying. You tap the screen to make your character hop forward, and swipe left or right to move sideways. You have to avoid cars, trucks, buses, trains, boats, logs, crocodiles, eagles, and other hazards that can kill you instantly. You also have to watch out for the edge of the screen, which moves forward as you progress. If you fall behind or stop moving for too long, you will be caught by an eagle that will end your run.

    -

    As you play, you will collect coins that appear randomly on the road or in the air. You can use these coins to buy new characters from a vending machine. There are over 150 characters to unlock, each with their own appearance, sound effects, and environment. For example, you can play as a chicken, a frog, a unicorn, a robot, a zombie, a dragon, a penguin, a kangaroo, a dinosaur, a celebrity, and many more. Some characters have special abilities that can help or hinder your gameplay. For example, some characters can fly over obstacles or swim in water, while others can explode or attract more enemies.

    -

    The game also has various challenges and achievements that you can complete and collect. For example, you can try to cross a certain number of roads, collect a certain number of coins, or play with a specific character. Completing these challenges will reward you with more coins or new characters. You can also view your achievements and compare them with other players on the leaderboard.

    -

    Crossy Road Tips and Tricks: How to Score Higher and Avoid Obstacles

    -

    If you want to improve your skills and score higher in Crossy Road, here are some tips and tricks that you can follow:

    -

    crossy road game download for android
    -crossy road app store free
    -crossy road windows 10 pc download
    -crossy road play online no download
    -crossy road apk mod unlimited coins
    -crossy road ios download latest version
    -crossy road microsoft store free game
    -crossy road cheats and hacks download
    -crossy road android tv apk
    -crossy road disney download for ipad
    -crossy road multiplayer mode download
    -crossy road characters unlock free
    -crossy road review and ratings download
    -crossy road update download new features
    -crossy road hipster whale free game
    -crossy road arcade machine download
    -crossy road minecraft map download
    -crossy road lego set free shipping
    -crossy road plush toys download coupon
    -crossy road soundtrack download mp3
    -crossy road tips and tricks free guide
    -crossy road wallpaper download hd
    -crossy road pac man edition free
    -crossy road halloween update download
    -crossy road chinese new year free gift
    -crossy road easter eggs and secrets download
    -crossy road zombie mode free play
    -crossy road vr download for oculus quest
    -crossy road world record score free video
    -crossy road trivia and facts download quiz
    -crossy road memes and jokes free laugh
    -crossy road fan art and comics download pdf
    -crossy road merchandise and accessories free delivery
    -crossy road stickers and emojis download app
    -crossy road birthday party ideas free printable
    -crossy road cake and cupcakes download recipe
    -crossy road costume and cosplay free tutorial
    -crossy road tattoo and nail art download design
    -crossy road quiz and personality test free online
    -crossy road coloring pages and activity book download pdf
    -crossy road crochet and knitting patterns free download pdf
    -crossy road origami and paper crafts download instructions
    -crossy road crossword and word search puzzles free printable
    -crossy road bingo and card games download printables
    -crossy road board game and dice game free shipping
    -crossy road monopoly and uno edition free giveaway
    -crossy road trivia pursuit and jeopardy questions free online
    -crossy road crossword clue and answer free solver
    -crossy road anagram and scrabble words free generator

    -
      -
    • Time your jumps carefully. Don't rush or hesitate too much, as both can lead to fatal mistakes. Watch the traffic patterns and the gaps between the vehicles, and jump when it is safe to do so.
    • -
    • Use coins and gifts wisely. Coins can help you unlock more characters, which can make the game more fun and diverse. Gifts are random boxes that appear every few minutes or after watching a video ad. They contain coins or new characters, so don't miss them.
    • -
    • Play with your friends and family. Crossy Road has a multiplayer mode that allows you to play with up to four people on the same device or online. You can cooperate or compete with each other, and see who can cross the most roads. Playing with others can make the game more enjoyable and challenging.
    • -
    -

    Crossy Road Review: Why You Should Try This Addictive and Fun Game

    -

    Crossy Road is one of the most popular and successful arcade games of all time. It has received rave reviews from both players and critics, who praised its simple but addictive gameplay, its colorful and pixelated graphics, its humorous and varied characters, and its endless replay value. The game has also won several awards, such as the Apple Design Award in 2015 and the Google Play Best of 2014 Award. Here are some of the pros and cons of the game:

    - - - - - - - -
    ProsCons
    - Easy to learn but hard to master- Can be frustrating or repetitive at times
    - Free to download and play- Contains ads and in-app purchases
    - Has over 150 characters and environments to unlock and explore- Some characters are hard to get or require real money
    - Has challenges and achievements to complete and collect- Some challenges are too difficult or random
    - Has multiplayer mode to play with friends and family- Multiplayer mode can be laggy or buggy
    -

    The developers of Crossy Road are constantly updating the game with new features and content. They have added new characters, environments, modes, events, and Easter eggs over the years. They have also collaborated with other popular games and franchises, such as Disney, Pac-Man, Minecraft, and The Walking Dead, to create crossover characters and worlds. You can expect more surprises and fun from Crossy Road in the future.

    -

    Conclusion: Crossy Road Mobile Free Download is a Must-Have Game for Arcade Lovers

    -

    Crossy Road is a game that will keep you entertained for hours. It is a simple but addictive game that challenges you to cross roads, train tracks, and rivers without getting squashed. It is a game that will make you laugh, cry, scream, and cheer. It is a game that will test your reflexes, skills, patience, and luck. It is a game that will let you play with over 150 different characters, each with their own unique abilities and settings. It is a game that will let you play with your friends and family on the same device or online. It is a game that will never get old or boring.

    -

    If you love arcade games, you should definitely download Crossy Road for free on your mobile device. You will not regret it. You will have a blast crossing roads, collecting coins, unlocking characters, completing challenges, and having fun.

    -

    So what are you waiting for? Download Crossy Road now and join the millions of players who are already hooked on this viral arcade game!

    -

    FAQs

    -
      -
    1. Q: How do I get more coins in Crossy Road?
    2. -
    3. A: You can get more coins by collecting them on the road or in the air, by opening gifts that appear every few minutes or after watching video ads, by completing challenges that reward you with coins or new characters, or by buying them with real money through in-app purchases.
    4. -
    5. Q: How do I unlock new characters in Crossy Road?
    6. -
    7. A: You can unlock new characters by buying them from a vending machine with coins, by opening gifts that contain new characters or coins, by completing challenges that reward you with new characters or coins, or by finding secret characters that are hidden in the game. Some characters are also available for a limited time during special events or collaborations.
    8. -
    9. Q: How do I play multiplayer mode in Crossy Road?
    10. -
    11. A: You can play multiplayer mode with up to four people on the same device or online. To play on the same device, you need to connect two or more controllers to your device, such as gamepads, keyboards, or mice. To play online, you need to sign in with your Google Play Games or Game Center account, and invite your friends or join a random match. You can also play with nearby players using Bluetooth or Wi-Fi.
    12. -
    13. Q: How do I save my progress in Crossy Road?
    14. -
    15. A: You can save your progress in Crossy Road by signing in with your Google Play Games or Game Center account. This will allow you to sync your coins, characters, achievements, and leaderboard scores across different devices. You can also backup your data to the cloud using the settings menu.
    16. -
    17. Q: How do I contact the developers of Crossy Road?
    18. -
    19. A: You can contact the developers of Crossy Road by visiting their website, following them on social media , or sending them an email at support@yodo1.com. They are always happy to hear from their fans and appreciate any feedback or suggestions.
    20. -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Color Rummy App and Play Real Cash Rummy Online with Friends.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Color Rummy App and Play Real Cash Rummy Online with Friends.md deleted file mode 100644 index 6fef4bf3b3e6a0f9c7b9534978f5ee45dd2129da..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Color Rummy App and Play Real Cash Rummy Online with Friends.md +++ /dev/null @@ -1,112 +0,0 @@ -
    -

    Online Rummy Game Color Rummy Download: A Fun and Easy Way to Play Rummy Anytime, Anywhere

    -

    Rummy is one of the most popular card games in the world. It is a game of skill, strategy, and luck that can be enjoyed by people of all ages and backgrounds. But what if you don't have a deck of cards or a partner to play with? Don't worry, there is a solution for you: online rummy game color rummy download.

    -

    online rummy game color rummy download


    DOWNLOAD ✓✓✓ https://bltlly.com/2uOmMt



    -

    Color Rummy is a free app that lets you play rummy online with other players or against the computer. You can choose from different game modes, such as classic rummy, gin rummy, or Oklahoma rummy. You can also customize your game settings, such as the number of players, the number of cards, and the difficulty level. Whether you are a beginner or a pro, you will find a challenge that suits your skills and preferences.

    -

    What is Color Rummy?

    -

    Color Rummy is an online rummy game that has a unique feature: the cards have different colors. This adds an extra layer of fun and strategy to the game, as you can form sets and runs based on the color of the cards, not just their rank or suit. For example, you can make a set of three red sevens or a run of four green cards.

    -

    The color of the cards also affects the scoring system. The more colors you use in your melds, the more points you get. However, if you use only one color in your melds, you get a bonus of 50 points. This means that you have to balance between using more colors for higher points or using one color for a bonus.

    -

    How to play Color Rummy

    -

    The rules of Color Rummy are similar to the rules of traditional rummy. The objective of the game is to form valid melds (sets or runs) with your cards and get rid of all your cards before your opponents do. A set is a group of three or four cards of the same rank, regardless of their suit or color. A run is a sequence of three or more cards of the same suit and consecutive rank, regardless of their color.

    -

    color rummy online game free download
    -how to play color rummy online game
    -best online rummy game with color rummy feature
    -download color rummy online game for android
    -color rummy online game rules and tips
    -online rummy game color rummy apk download
    -color rummy online game review and ratings
    -online rummy game with color rummy mode
    -color rummy online game strategy and tricks
    -download color rummy online game for pc
    -color rummy online game tournaments and prizes
    -online rummy game color rummy app download
    -color rummy online game offers and bonuses
    -online rummy game with color rummy option
    -color rummy online game guide and tutorial
    -download color rummy online game for ios
    -color rummy online game features and benefits
    -online rummy game color rummy download link
    -color rummy online game faq and support
    -online rummy game with color rummy variation
    -color rummy online game demo and trial
    -download color rummy online game for windows
    -color rummy online game feedback and testimonials
    -online rummy game color rummy software download
    -color rummy online game advantages and disadvantages
    -online rummy game with color rummy challenge
    -color rummy online game requirements and compatibility
    -download color rummy online game for mac
    -color rummy online game comparison and alternatives
    -online rummy game color rummy installation guide
    -color rummy online game updates and news
    -online rummy game with color rummy bonus code
    -color rummy online game referral and rewards program
    -download color rummy online game for linux
    -color rummy online game security and privacy policy
    -online rummy game color rummy system requirements
    -color rummy online game terms and conditions
    -download color rummy online game for chromebook
    -color rummy online game customer service and contact details
    -online rummy game with color rummy leaderboard and rankings

    -

    At the beginning of each round, each player is dealt 10 cards (or 7 cards in gin rummy mode). The remaining cards are placed face down on the table as the stock pile. One card is turned face up next to the stock pile as the discard pile. The player to the left of the dealer starts the game by drawing a card from either the stock pile or the discard pile. Then, they can either make a meld with their cards or discard one card to the discard pile. The turn then passes to the next player clockwise.

    -

    The game continues until one player goes out by getting rid of all their cards. They must have at least one meld in their hand and discard their last card to do so. The other players then count their points based on the value and color of their remaining cards. The player with the lowest score wins the round.

    -

    Why you should download Color Rummy

    -

    There are many reasons why you should download Color Rummy and play it on your device. Here are some of them:

    -
      -
    • It is free and easy to download and install.
    • -
    • It has stunning graphics and sound effects that create an immersive gaming experience.
    • -
    • It has different game modes and settings that cater to your preferences and skill level.
    • -
    • It has an online multiplayer mode that lets you play with other players from around the world.
    • -
    • It has an offline mode that lets you play against the computer when you don't have an internet connection.
    • -
    • It has a tutorial mode that teaches you how to play the game and the rules of each game mode.
    • -
    • It has a leaderboard and achievements system that tracks your progress and rewards you for your performance.
    • -
    • It has a chat feature that lets you communicate with other players and make new friends.
    • -
    -

    As you can see, Color Rummy is a fun and easy way to play rummy anytime, anywhere. You can enjoy the game with your friends, family, or strangers online. You can also challenge yourself and improve your skills by playing against the computer. You can also learn new strategies and tips by watching the tutorial and reading the rules. Color Rummy is more than just a game, it is a community of rummy lovers.

    -

    How to download Color Rummy

    -

    If you are interested in playing Color Rummy, you can download it for free from the official website or from the app store of your device. Here are the steps to download Color Rummy for Android and iOS devices:

    -

    For Android devices

    -
      -
    1. Go to the Google Play Store and search for "Color Rummy".
    2. -
    3. Select the app with the logo of a colorful card and tap on "Install".
    4. -
    5. Wait for the app to download and install on your device.
    6. -
    7. Open the app and sign up with your email or Facebook account.
    8. -
    9. Enjoy playing Color Rummy!
    10. -
    -

    For iOS devices

    -
      -
    1. Go to the App Store and search for "Color Rummy".
    2. -
    3. Select the app with the logo of a colorful card and tap on "Get".
    4. -
    5. Enter your Apple ID password or use Touch ID or Face ID to confirm.
    6. -
    7. Wait for the app to download and install on your device.
    8. -
    9. Open the app and sign up with your email or Facebook account.
    10. -
    11. Enjoy playing Color Rummy!
    12. -
    -

    Tips and tricks for playing Color Rummy

    -

    If you want to become a better player of Color Rummy, you need to know some tips and tricks that can help you win more games. Here are some of them:

    -

    Learn the rules and scoring system

    -

    The first thing you need to do is to learn the rules and scoring system of Color Rummy. You need to know how to form valid melds, how to go out, how to count your points, and how the color of the cards affects your score. You can read the rules of each game mode in the app or watch the tutorial videos. You can also practice by playing against the computer in offline mode.

    -

    Use the jokers wisely

    -

    Jokers are special cards that can act as any card you want. They can help you complete your melds faster and score more points. However, you need to use them wisely, as they also have some drawbacks. For example, if you use a joker in a meld, you cannot use that meld for a bonus. Also, if you have a joker in your hand when someone else goes out, you lose 25 points. Therefore, you need to decide when to use a joker and when to save it for later.

    -

    Plan your moves ahead

    -

    Rummy is a game of skill and strategy, not just luck. You need to plan your moves ahead and think about what cards you need, what cards you can discard, what cards your opponents might have, and what cards might come up next. You also need to keep track of the cards that have been played and discarded, so you can estimate what cards are left in the stock pile. By planning your moves ahead, you can increase your chances of forming melds faster and going out before your opponents.

    -

    Bluff your opponents

    -

    Rummy is also a game of psychology and deception. You need to bluff your opponents by making them think that you have different cards than you actually do. For example, you can discard a card that you don't need but that might be useful for your opponents, so they think that you are close to going out. Or, you can draw a card from the discard pile that doesn't help you but that might confuse your opponents about what melds you are trying to make. By bluffing your opponents, you can make them play differently and make mistakes that benefit you.

    -

    Conclusion

    -

    In conclusion, online rummy game color rummy download is a fun and easy way to play rummy anytime, anywhere. You can enjoy the game with different game modes, settings, features, and players. You can also improve your skills by learning the rules, scoring system, tips, and tricks. You can also have fun by bluffing your opponents and chatting with them. Color Rummy is more than just a game, it is a community of rummy lovers. If you are looking for a new way to play rummy, you should download Color Rummy today and join the fun!

    -

    FAQs

    -

    Here are some frequently asked questions about online rummy game color rummy download:

    -
      -
    1. Is Color Rummy safe and secure?
    2. -

      Yes, Color Rummy is safe and secure. It uses encryption and authentication technologies to protect your personal and financial information. It also follows the fair play and responsible gaming policies to ensure a safe and enjoyable gaming environment.

      -
    3. How can I contact the customer support of Color Rummy?
    4. -

      You can contact the customer support of Color Rummy by sending an email to support@colorrummy.com or by filling out the contact form on the website. You can also visit the FAQ section on the app or the website for more information.

      -
    5. Can I play Color Rummy with real money?
    6. -

      No, Color Rummy is a free-to-play game that does not involve any real money transactions. You can play with virtual coins that you can earn by playing the game or by watching ads. You can also purchase more coins with in-app purchases if you want to.

      -
    7. Can I play Color Rummy offline?
    8. -

      Yes, you can play Color Rummy offline. You can choose the offline mode on the app and play against the computer. You can also play online when you have an internet connection and sync your progress and coins.

      -
    9. Can I invite my friends to play Color Rummy with me?
    10. -

      Yes, you can invite your friends to play Color Rummy with you. You can use the invite feature on the app and send them a link to download the game. You can also join or create a private room on the app and play with your friends exclusively.

      -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/IGameGuardian 55.md b/spaces/tioseFevbu/cartoon-converter/scripts/IGameGuardian 55.md deleted file mode 100644 index 57c4f57aa9a1c43d8c4de291c5a42fbb5c738e7c..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/IGameGuardian 55.md +++ /dev/null @@ -1,52 +0,0 @@ - -

    IGameGuardian 55: A Cheat Tool for Android Games

    -

    If you are looking for a way to modify your favorite Android games, you might be interested in IGameGuardian 55. This is a cheat tool that allows you to change various aspects of the games, such as money, health, speed, and more. You can also use it to load emulators and play games from other platforms on your Android device.

    -

    IGameGuardian 55


    Download Zip ☆☆☆ https://urlcod.com/2uHvWW



    -

    IGameGuardian 55 is an updated version of GameGuardian, a popular cheat app that has been around for a long time. It works on rooted Android devices or in virtual environments without root. It supports Android 2.3.3 and above, and can run on ARM, x64, and x86 devices, including x86 emulators.

    -

    Some of the features of IGameGuardian 55 are:

    -
      -
    • Search and edit encrypted values
    • -
    • Search by difference, mask, or range
    • -
    • Search by data type, such as double, float, dword, byte, etc.
    • -
    • Lua scripting support
    • -
    • Modify all search results at once
    • -
    • Filter search results by address or value
    • -
    • Search in the background
    • -
    • Fill memory with a value
    • -
    • Time jump feature
    • -
    • Dump and copy memory
    • -
    • Customizable UI and app locale
    • -
    -

    To use IGameGuardian 55, you need to download the APK file from the official website and install it on your device. Then, you need to grant it root access or use a virtual space app to run it without root. After that, you can launch the app and select the game you want to hack. You can then use the search and edit functions to find and change the values you want.

    -

    -

    However, you should be careful when using IGameGuardian 55, as it may not work on some games or cause them to crash. You should also avoid using it on online games that have anti-cheat systems, as you may risk losing your account or getting banned. You should only use it for fun and not to ruin the game experience for others.

    -

    IGameGuardian 55 is a powerful cheat tool that can help you enjoy your Android games in new ways. However, you should use it responsibly and at your own risk.

    - -

    One of the games that you can hack with IGameGuardian 55 is Plants vs. Zombies, a popular tower defense game where you have to protect your house from waves of zombies using different plants. With IGameGuardian 55, you can change the amount of sun you have, which is used to plant more plants. You can also change the number of coins you have, which can be used to buy upgrades and items.

    -

    To hack Plants vs. Zombies with IGameGuardian 55, you need to follow these steps:

    -
      -
    1. Launch IGameGuardian 55 and tap on Application to select Plants vs. Zombies.
    2. -
    3. Tap on Search and set the search mode to Auto.
    4. -
    5. Enter the value of sun or coins you have in the game and tap on Search.
    6. -
    7. Go back to the game and spend or earn some sun or coins.
    8. -
    9. Go back to IGameGuardian 55 and search for the new value of sun or coins.
    10. -
    11. Repeat steps 4 and 5 until you get a few results.
    12. -
    13. Select the results and tap on Modify.
    14. -
    15. Enter the value of sun or coins you want to have and tap on OK.
    16. -
    17. Go back to the game and enjoy your hacked sun or coins.
    18. -
    -

    Another game that you can hack with IGameGuardian 55 is GTA: San Andreas, a classic open-world action-adventure game where you can explore a fictional version of California as a gangster named CJ. With IGameGuardian 55, you can change your health, armor, money, weapons, stats, and more.

    -

    To hack GTA: San Andreas with IGameGuardian 55, you need to follow these steps:

    -
      -
    1. Launch IGameGuardian 55 and tap on Application to select GTA: San Andreas.
    2. -
    3. Tap on Search and set the search mode to Auto.
    4. -
    5. Enter the value of health, armor, money, or any other parameter you want to hack and tap on Search.
    6. -
    7. Go back to the game and change the value of the parameter by getting hurt, spending money, etc.
    8. -
    9. Go back to IGameGuardian 55 and search for the new value of the parameter.
    10. -
    11. Repeat steps 4 and 5 until you get a few results.
    12. -
    13. Select the results and tap on Modify.
    14. -
    15. Enter the value of the parameter you want to have and tap on OK.
    16. -
    17. Go back to the game and enjoy your hacked parameter.
    18. -

    81aa517590
    -
    -
    \ No newline at end of file diff --git a/spaces/tomofi/MMOCR/mmocr/models/ner/convertors/__init__.py b/spaces/tomofi/MMOCR/mmocr/models/ner/convertors/__init__.py deleted file mode 100644 index 4d4e15c3dbd6086e63e0d38f477b8feb4a27333a..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/mmocr/models/ner/convertors/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .ner_convertor import NerConvertor - -__all__ = ['NerConvertor'] diff --git a/spaces/tomofi/MMOCR/mmocr/models/textdet/postprocess/utils.py b/spaces/tomofi/MMOCR/mmocr/models/textdet/postprocess/utils.py deleted file mode 100644 index faae589577ceae6e874714595a1a425043ebe9fc..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/mmocr/models/textdet/postprocess/utils.py +++ /dev/null @@ -1,482 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import functools -import operator - -import cv2 -import numpy as np -import pyclipper -from numpy.fft import ifft -from numpy.linalg import norm -from shapely.geometry import Polygon - -from mmocr.core.evaluation.utils import boundary_iou - - -def filter_instance(area, confidence, min_area, min_confidence): - return bool(area < min_area or confidence < min_confidence) - - -def box_score_fast(bitmap, _box): - h, w = bitmap.shape[:2] - box = _box.copy() - xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int32), 0, w - 1) - xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int32), 0, w - 1) - ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int32), 0, h - 1) - ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int32), 0, h - 1) - - mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8) - box[:, 0] = box[:, 0] - xmin - box[:, 1] = box[:, 1] - ymin - cv2.fillPoly(mask, box.reshape(1, -1, 2).astype(np.int32), 1) - return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0] - - -def unclip(box, unclip_ratio=1.5): - poly = Polygon(box) - distance = poly.area * unclip_ratio / poly.length - offset = pyclipper.PyclipperOffset() - offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) - expanded = np.array(offset.Execute(distance)) - return expanded - - -def fill_hole(input_mask): - h, w = input_mask.shape - canvas = np.zeros((h + 2, w + 2), np.uint8) - canvas[1:h + 1, 1:w + 1] = input_mask.copy() - - mask = np.zeros((h + 4, w + 4), np.uint8) - - cv2.floodFill(canvas, mask, (0, 0), 1) - canvas = canvas[1:h + 1, 1:w + 1].astype(np.bool) - - return ~canvas | input_mask - - -def centralize(points_yx, - normal_sin, - normal_cos, - radius, - contour_mask, - step_ratio=0.03): - - h, w = contour_mask.shape - top_yx = bot_yx = points_yx - step_flags = np.ones((len(points_yx), 1), dtype=np.bool) - step = step_ratio * radius * np.hstack([normal_sin, normal_cos]) - while np.any(step_flags): - next_yx = np.array(top_yx + step, dtype=np.int32) - next_y, next_x = next_yx[:, 0], next_yx[:, 1] - step_flags = (next_y >= 0) & (next_y < h) & (next_x > 0) & ( - next_x < w) & contour_mask[np.clip(next_y, 0, h - 1), - np.clip(next_x, 0, w - 1)] - top_yx = top_yx + step_flags.reshape((-1, 1)) * step - step_flags = np.ones((len(points_yx), 1), dtype=np.bool) - while np.any(step_flags): - next_yx = np.array(bot_yx - step, dtype=np.int32) - next_y, next_x = next_yx[:, 0], next_yx[:, 1] - step_flags = (next_y >= 0) & (next_y < h) & (next_x > 0) & ( - next_x < w) & contour_mask[np.clip(next_y, 0, h - 1), - np.clip(next_x, 0, w - 1)] - bot_yx = bot_yx - step_flags.reshape((-1, 1)) * step - centers = np.array((top_yx + bot_yx) * 0.5, dtype=np.int32) - return centers - - -def merge_disks(disks, disk_overlap_thr): - xy = disks[:, 0:2] - radius = disks[:, 2] - scores = disks[:, 3] - order = scores.argsort()[::-1] - - merged_disks = [] - while order.size > 0: - if order.size == 1: - merged_disks.append(disks[order]) - break - i = order[0] - d = norm(xy[i] - xy[order[1:]], axis=1) - ri = radius[i] - r = radius[order[1:]] - d_thr = (ri + r) * disk_overlap_thr - - merge_inds = np.where(d <= d_thr)[0] + 1 - if merge_inds.size > 0: - merge_order = np.hstack([i, order[merge_inds]]) - merged_disks.append(np.mean(disks[merge_order], axis=0)) - else: - merged_disks.append(disks[i]) - - inds = np.where(d > d_thr)[0] + 1 - order = order[inds] - merged_disks = np.vstack(merged_disks) - - return merged_disks - - -def poly_nms(polygons, threshold): - assert isinstance(polygons, list) - - polygons = np.array(sorted(polygons, key=lambda x: x[-1])) - - keep_poly = [] - index = [i for i in range(polygons.shape[0])] - - while len(index) > 0: - keep_poly.append(polygons[index[-1]].tolist()) - A = polygons[index[-1]][:-1] - index = np.delete(index, -1) - - iou_list = np.zeros((len(index), )) - for i in range(len(index)): - B = polygons[index[i]][:-1] - - iou_list[i] = boundary_iou(A, B, 1) - remove_index = np.where(iou_list > threshold) - index = np.delete(index, remove_index) - - return keep_poly - - -def fourier2poly(fourier_coeff, num_reconstr_points=50): - """ Inverse Fourier transform - Args: - fourier_coeff (ndarray): Fourier coefficients shaped (n, 2k+1), - with n and k being candidates number and Fourier degree - respectively. - num_reconstr_points (int): Number of reconstructed polygon points. - Returns: - Polygons (ndarray): The reconstructed polygons shaped (n, n') - """ - - a = np.zeros((len(fourier_coeff), num_reconstr_points), dtype='complex') - k = (len(fourier_coeff[0]) - 1) // 2 - - a[:, 0:k + 1] = fourier_coeff[:, k:] - a[:, -k:] = fourier_coeff[:, :k] - - poly_complex = ifft(a) * num_reconstr_points - polygon = np.zeros((len(fourier_coeff), num_reconstr_points, 2)) - polygon[:, :, 0] = poly_complex.real - polygon[:, :, 1] = poly_complex.imag - return polygon.astype('int32').reshape((len(fourier_coeff), -1)) - - -class Node: - - def __init__(self, ind): - self.__ind = ind - self.__links = set() - - @property - def ind(self): - return self.__ind - - @property - def links(self): - return set(self.__links) - - def add_link(self, link_node): - self.__links.add(link_node) - link_node.__links.add(self) - - -def graph_propagation(edges, scores, text_comps, edge_len_thr=50.): - """Propagate edge score information and construct graph. This code was - partially adapted from https://github.com/GXYM/DRRG licensed under the MIT - license. - - Args: - edges (ndarray): The edge array of shape N * 2, each row is a node - index pair that makes up an edge in graph. - scores (ndarray): The edge score array. - text_comps (ndarray): The text components. - edge_len_thr (float): The edge length threshold. - - Returns: - vertices (list[Node]): The Nodes in graph. - score_dict (dict): The edge score dict. - """ - assert edges.ndim == 2 - assert edges.shape[1] == 2 - assert edges.shape[0] == scores.shape[0] - assert text_comps.ndim == 2 - assert isinstance(edge_len_thr, float) - - edges = np.sort(edges, axis=1) - score_dict = {} - for i, edge in enumerate(edges): - if text_comps is not None: - box1 = text_comps[edge[0], :8].reshape(4, 2) - box2 = text_comps[edge[1], :8].reshape(4, 2) - center1 = np.mean(box1, axis=0) - center2 = np.mean(box2, axis=0) - distance = norm(center1 - center2) - if distance > edge_len_thr: - scores[i] = 0 - if (edge[0], edge[1]) in score_dict: - score_dict[edge[0], edge[1]] = 0.5 * ( - score_dict[edge[0], edge[1]] + scores[i]) - else: - score_dict[edge[0], edge[1]] = scores[i] - - nodes = np.sort(np.unique(edges.flatten())) - mapping = -1 * np.ones((np.max(nodes) + 1), dtype=np.int) - mapping[nodes] = np.arange(nodes.shape[0]) - order_inds = mapping[edges] - vertices = [Node(node) for node in nodes] - for ind in order_inds: - vertices[ind[0]].add_link(vertices[ind[1]]) - - return vertices, score_dict - - -def connected_components(nodes, score_dict, link_thr): - """Conventional connected components searching. This code was partially - adapted from https://github.com/GXYM/DRRG licensed under the MIT license. - - Args: - nodes (list[Node]): The list of Node objects. - score_dict (dict): The edge score dict. - link_thr (float): The link threshold. - - Returns: - clusters (List[list[Node]]): The clustered Node objects. - """ - assert isinstance(nodes, list) - assert all([isinstance(node, Node) for node in nodes]) - assert isinstance(score_dict, dict) - assert isinstance(link_thr, float) - - clusters = [] - nodes = set(nodes) - while nodes: - node = nodes.pop() - cluster = {node} - node_queue = [node] - while node_queue: - node = node_queue.pop(0) - neighbors = set([ - neighbor for neighbor in node.links if - score_dict[tuple(sorted([node.ind, neighbor.ind]))] >= link_thr - ]) - neighbors.difference_update(cluster) - nodes.difference_update(neighbors) - cluster.update(neighbors) - node_queue.extend(neighbors) - clusters.append(list(cluster)) - return clusters - - -def clusters2labels(clusters, num_nodes): - """Convert clusters of Node to text component labels. This code was - partially adapted from https://github.com/GXYM/DRRG licensed under the MIT - license. - - Args: - clusters (List[list[Node]]): The clusters of Node objects. - num_nodes (int): The total node number of graphs in an image. - - Returns: - node_labels (ndarray): The node label array. - """ - assert isinstance(clusters, list) - assert all([isinstance(cluster, list) for cluster in clusters]) - assert all( - [isinstance(node, Node) for cluster in clusters for node in cluster]) - assert isinstance(num_nodes, int) - - node_labels = np.zeros(num_nodes) - for cluster_ind, cluster in enumerate(clusters): - for node in cluster: - node_labels[node.ind] = cluster_ind - return node_labels - - -def remove_single(text_comps, comp_pred_labels): - """Remove isolated text components. This code was partially adapted from - https://github.com/GXYM/DRRG licensed under the MIT license. - - Args: - text_comps (ndarray): The text components. - comp_pred_labels (ndarray): The clustering labels of text components. - - Returns: - filtered_text_comps (ndarray): The text components with isolated ones - removed. - comp_pred_labels (ndarray): The clustering labels with labels of - isolated text components removed. - """ - assert text_comps.ndim == 2 - assert text_comps.shape[0] == comp_pred_labels.shape[0] - - single_flags = np.zeros_like(comp_pred_labels) - pred_labels = np.unique(comp_pred_labels) - for label in pred_labels: - current_label_flag = (comp_pred_labels == label) - if np.sum(current_label_flag) == 1: - single_flags[np.where(current_label_flag)[0][0]] = 1 - keep_ind = [i for i in range(len(comp_pred_labels)) if not single_flags[i]] - filtered_text_comps = text_comps[keep_ind, :] - filtered_labels = comp_pred_labels[keep_ind] - - return filtered_text_comps, filtered_labels - - -def norm2(point1, point2): - return ((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2)**0.5 - - -def min_connect_path(points): - """Find the shortest path to traverse all points. This code was partially - adapted from https://github.com/GXYM/DRRG licensed under the MIT license. - - Args: - points(List[list[int]]): The point sequence [[x0, y0], [x1, y1], ...]. - - Returns: - shortest_path(List[list[int]]): The shortest index path. - """ - assert isinstance(points, list) - assert all([isinstance(point, list) for point in points]) - assert all([isinstance(coord, int) for point in points for coord in point]) - - points_queue = points.copy() - shortest_path = [] - current_edge = [[], []] - - edge_dict0 = {} - edge_dict1 = {} - current_edge[0] = points_queue[0] - current_edge[1] = points_queue[0] - points_queue.remove(points_queue[0]) - while points_queue: - for point in points_queue: - length0 = norm2(point, current_edge[0]) - edge_dict0[length0] = [point, current_edge[0]] - length1 = norm2(current_edge[1], point) - edge_dict1[length1] = [current_edge[1], point] - key0 = min(edge_dict0.keys()) - key1 = min(edge_dict1.keys()) - - if key0 <= key1: - start = edge_dict0[key0][0] - end = edge_dict0[key0][1] - shortest_path.insert(0, [points.index(start), points.index(end)]) - points_queue.remove(start) - current_edge[0] = start - else: - start = edge_dict1[key1][0] - end = edge_dict1[key1][1] - shortest_path.append([points.index(start), points.index(end)]) - points_queue.remove(end) - current_edge[1] = end - - edge_dict0 = {} - edge_dict1 = {} - - shortest_path = functools.reduce(operator.concat, shortest_path) - shortest_path = sorted(set(shortest_path), key=shortest_path.index) - - return shortest_path - - -def in_contour(cont, point): - x, y = point - is_inner = cv2.pointPolygonTest(cont, (int(x), int(y)), False) > 0.5 - return is_inner - - -def fix_corner(top_line, bot_line, start_box, end_box): - """Add corner points to predicted side lines. This code was partially - adapted from https://github.com/GXYM/DRRG licensed under the MIT license. - - Args: - top_line (List[list[int]]): The predicted top sidelines of text - instance. - bot_line (List[list[int]]): The predicted bottom sidelines of text - instance. - start_box (ndarray): The first text component box. - end_box (ndarray): The last text component box. - - Returns: - top_line (List[list[int]]): The top sidelines with corner point added. - bot_line (List[list[int]]): The bottom sidelines with corner point - added. - """ - assert isinstance(top_line, list) - assert all(isinstance(point, list) for point in top_line) - assert isinstance(bot_line, list) - assert all(isinstance(point, list) for point in bot_line) - assert start_box.shape == end_box.shape == (4, 2) - - contour = np.array(top_line + bot_line[::-1]) - start_left_mid = (start_box[0] + start_box[3]) / 2 - start_right_mid = (start_box[1] + start_box[2]) / 2 - end_left_mid = (end_box[0] + end_box[3]) / 2 - end_right_mid = (end_box[1] + end_box[2]) / 2 - if not in_contour(contour, start_left_mid): - top_line.insert(0, start_box[0].tolist()) - bot_line.insert(0, start_box[3].tolist()) - elif not in_contour(contour, start_right_mid): - top_line.insert(0, start_box[1].tolist()) - bot_line.insert(0, start_box[2].tolist()) - if not in_contour(contour, end_left_mid): - top_line.append(end_box[0].tolist()) - bot_line.append(end_box[3].tolist()) - elif not in_contour(contour, end_right_mid): - top_line.append(end_box[1].tolist()) - bot_line.append(end_box[2].tolist()) - return top_line, bot_line - - -def comps2boundaries(text_comps, comp_pred_labels): - """Construct text instance boundaries from clustered text components. This - code was partially adapted from https://github.com/GXYM/DRRG licensed under - the MIT license. - - Args: - text_comps (ndarray): The text components. - comp_pred_labels (ndarray): The clustering labels of text components. - - Returns: - boundaries (List[list[float]]): The predicted boundaries of text - instances. - """ - assert text_comps.ndim == 2 - assert len(text_comps) == len(comp_pred_labels) - boundaries = [] - if len(text_comps) < 1: - return boundaries - for cluster_ind in range(0, int(np.max(comp_pred_labels)) + 1): - cluster_comp_inds = np.where(comp_pred_labels == cluster_ind) - text_comp_boxes = text_comps[cluster_comp_inds, :8].reshape( - (-1, 4, 2)).astype(np.int32) - score = np.mean(text_comps[cluster_comp_inds, -1]) - - if text_comp_boxes.shape[0] < 1: - continue - - elif text_comp_boxes.shape[0] > 1: - centers = np.mean( - text_comp_boxes, axis=1).astype(np.int32).tolist() - shortest_path = min_connect_path(centers) - text_comp_boxes = text_comp_boxes[shortest_path] - top_line = np.mean( - text_comp_boxes[:, 0:2, :], axis=1).astype(np.int32).tolist() - bot_line = np.mean( - text_comp_boxes[:, 2:4, :], axis=1).astype(np.int32).tolist() - top_line, bot_line = fix_corner(top_line, bot_line, - text_comp_boxes[0], - text_comp_boxes[-1]) - boundary_points = top_line + bot_line[::-1] - - else: - top_line = text_comp_boxes[0, 0:2, :].astype(np.int32).tolist() - bot_line = text_comp_boxes[0, 2:4:-1, :].astype(np.int32).tolist() - boundary_points = top_line + bot_line - - boundary = [p for coord in boundary_points for p in coord] + [score] - boundaries.append(boundary) - - return boundaries diff --git a/spaces/tomofi/MMOCR/mmocr/models/textrecog/fusers/abi_fuser.py b/spaces/tomofi/MMOCR/mmocr/models/textrecog/fusers/abi_fuser.py deleted file mode 100644 index 310cf6f0421ea3575f1935489440f1b37964a194..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/mmocr/models/textrecog/fusers/abi_fuser.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -from mmcv.runner import BaseModule - -from mmocr.models.builder import FUSERS - - -@FUSERS.register_module() -class ABIFuser(BaseModule): - """Mix and align visual feature and linguistic feature Implementation of - language model of `ABINet `_. - - Args: - d_model (int): Hidden size of input. - max_seq_len (int): Maximum text sequence length :math:`T`. - num_chars (int): Number of text characters :math:`C`. - init_cfg (dict): Specifies the initialization method for model layers. - """ - - def __init__(self, - d_model=512, - max_seq_len=40, - num_chars=90, - init_cfg=None, - **kwargs): - super().__init__(init_cfg=init_cfg) - - self.max_seq_len = max_seq_len + 1 # additional stop token - self.w_att = nn.Linear(2 * d_model, d_model) - self.cls = nn.Linear(d_model, num_chars) - - def forward(self, l_feature, v_feature): - """ - Args: - l_feature: (N, T, E) where T is length, N is batch size and - d is dim of model. - v_feature: (N, T, E) shape the same as l_feature. - - Returns: - A dict with key ``logits`` - The logits of shape (N, T, C) where N is batch size, T is length - and C is the number of characters. - """ - f = torch.cat((l_feature, v_feature), dim=2) - f_att = torch.sigmoid(self.w_att(f)) - output = f_att * v_feature + (1 - f_att) * l_feature - - logits = self.cls(output) # (N, T, C) - - return {'logits': logits} diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/README.md b/spaces/tomofi/NDLOCR/src/ndl_layout/README.md deleted file mode 100644 index 1cda8fe093a10bd924dc56d580a5f5bfe04db50b..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/README.md +++ /dev/null @@ -1,92 +0,0 @@ -# NDLOCR用レイアウト認識モジュール - -レイアウト要素を抽出するためのモジュールのリポジトリです。 - -本プログラムは、国立国会図書館が株式会社モルフォAIソリューションズに委託して作成したものです。 - -本プログラムは、国立国会図書館がCC BY 4.0ライセンスで公開するものです。詳細については -[LICENSE](./LICENSE -)をご覧ください。 - -# 環境構築 - -python3.7かつ、cuda 11.1をインストール済みの環境の場合 -ndl_layoutディレクトリ直下で以下のコマンドを実行する。 -``` -pip install torch==1.8.1+cu111 torchvision==0.9.1+cu111 -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html -wget https://lab.ndl.go.jp/dataset/ndlocr/ndl_layout/ndl_layout_config.py -P ./models -wget https://lab.ndl.go.jp/dataset/ndlocr/ndl_layout/epoch_140_all_eql_bt.pth -P ./models -``` - -くわえて、元リポジトリ(https://github.com/open-mmlab/mmdetection) -をカスタマイズした[mmdetection](https://github.com/ndl-lab/mmdetection) -に依存しているため、下記のようにリポジトリの追加とインストールを行う。 - -```bash -git clone https://github.com/ndl-lab/mmdetection -cd mmdetection -python setup.py bdist_wheel -pip install dist/*.whl -``` - - -# 使い方 -※スクリプトファイルはndl_layoutディレクトリ直下で実行すること - -## tools/process.py : 推論用モジュール + CLI - -学習結果を使って推論を実行する。学習済みのモデルは`ndl_layout/models` 以下にあるものとする。 - -画像リストを引数で指定するには img_paths オプションを、画像リストをファイルから読み込む場合には list_path オプションを指定する。 - -output_path で出力 XML ファイルの格納先を変更することができる。(デフォルトは layout_prediction.xml) - -use_show オプションを追加すると処理結果をGUI上で確認することができる。 - -img_pathsオプションで画像リストを指定する例 -```bash -python -m tools.process --img_paths image/dir/path/*.jpg --use_show --output_path layout_prediction.xml --config ./models/ndl_layout_config.py --checkpoint ./models/epoch_140_all_eql_bt.pth -``` - -list_path オプションで画像リストを指定する例 -```bash -python -m tools.process --list_path image_list_file.list --use_show --output_path layout_prediction.xml --config ./models/ndl_layout_config.py --checkpoint ./models/epoch_140_all_eql_bt.pth -``` - -## tools/preprocess.py : 学習画像の追加&変換 - -画像のファイル名の変換、縮小を行い、MS COCO 形式に整形。 - -```bash -python -m tools.preprocess images_data_dir output_dir --use_link -``` - -出力解像度を下げる必要がない場合には、`--use_link`オプションを指定する。 - -高解像の場合など、解像度を下げたい場合には `--use_shrink` を使うと画像サイズとアノテーションを半分のサイズに縮小して出力する。 - -本リポジトリの追加学習に使用可能なファイル(アノテーション情報の含まれるjson及び、前処理後の画像)は `output_dir` で指定したディレクトリに出力される。 - - -## 学習時の手順 -1) ndl_layout/tools/preprocess.pyを使用し、NDLOCRXMLDataset形式の画像とアノテーションファイル(xml)をCOCO形式に変換し保存する。 -``` -cd mmdetection -python -m tools.preprocess images_data_dir output_dir --use_link -``` -output_dir内に画像のシンボリックリンク(またはコピー)とCOCO形式のアノテーションファイル(.json)を保存する。 - -アノテーションファイルは、data.json(全データのアノテーション)、train.json(ランダムに全体の9割)、test.json(train以外の残る1割)を生成する。 - -2) mmdetection/tools/train_ndl.py を使用し、モデルを学習する。 -``` -cd mmdetection -python tools/train_ndl.py configs/ndl/cascade_rcnn_r50_fpn_1x_ndl_1024_eql.py -``` -学習データ、work directory、初期値、学習回数等はconfigファイル内で指定するか、train_ndl.pyのオプションを使用する。オプションで指定されたものが優先される。 - -work directoryに、学習したモデル(epoch_XX.pth または latest.pth)とconfigファイル(train_ndl.pyのオプションを使用した場合その内容も反映)、学習時のログファイル(.logと.log.json)が保存される。 - -なお、このリポジトリで公開しているモデル(設定ファイルは`configs/ndl/cascade_rcnn_r50_fpn_1x_ndl_1024_eql.py`を参照)の学習時の初期重みには -https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth -を使用した。 diff --git a/spaces/trysem/Vector-diFusion/style.css b/spaces/trysem/Vector-diFusion/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/trysem/Vector-diFusion/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/ulysses115/diffsvc_test/modules/nsf_hifigan/nvSTFT.py b/spaces/ulysses115/diffsvc_test/modules/nsf_hifigan/nvSTFT.py deleted file mode 100644 index 35635c844ea1ae6258112f0ba92e417e81a22642..0000000000000000000000000000000000000000 --- a/spaces/ulysses115/diffsvc_test/modules/nsf_hifigan/nvSTFT.py +++ /dev/null @@ -1,111 +0,0 @@ -import math -import os -os.environ["LRU_CACHE_CAPACITY"] = "3" -import random -import torch -import torch.utils.data -import numpy as np -import librosa -from librosa.util import normalize -from librosa.filters import mel as librosa_mel_fn -from scipy.io.wavfile import read -import soundfile as sf - -def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False): - sampling_rate = None - try: - data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile. - except Exception as ex: - print(f"'{full_path}' failed to load.\nException:") - print(ex) - if return_empty_on_exception: - return [], sampling_rate or target_sr or 48000 - else: - raise Exception(ex) - - if len(data.shape) > 1: - data = data[:, 0] - assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension) - - if np.issubdtype(data.dtype, np.integer): # if audio data is type int - max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX - else: # if audio data is type fp32 - max_mag = max(np.amax(data), -np.amin(data)) - max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32 - - data = torch.FloatTensor(data.astype(np.float32))/max_mag - - if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except - return [], sampling_rate or target_sr or 48000 - if target_sr is not None and sampling_rate != target_sr: - data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr)) - sampling_rate = target_sr - - return data, sampling_rate - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - -class STFT(): - def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5): - self.target_sr = sr - - self.n_mels = n_mels - self.n_fft = n_fft - self.win_size = win_size - self.hop_length = hop_length - self.fmin = fmin - self.fmax = fmax - self.clip_val = clip_val - self.mel_basis = {} - self.hann_window = {} - - def get_mel(self, y, center=False): - sampling_rate = self.target_sr - n_mels = self.n_mels - n_fft = self.n_fft - win_size = self.win_size - hop_length = self.hop_length - fmin = self.fmin - fmax = self.fmax - clip_val = self.clip_val - - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - if fmax not in self.mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax) - self.mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device) - self.hann_window[str(y.device)] = torch.hann_window(self.win_size).to(y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_length)/2), int((n_fft-hop_length)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_length, win_length=win_size, window=self.hann_window[str(y.device)], - center=center, pad_mode='reflect', normalized=False, onesided=True) - # print(111,spec) - spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9)) - # print(222,spec) - spec = torch.matmul(self.mel_basis[str(fmax)+'_'+str(y.device)], spec) - # print(333,spec) - spec = dynamic_range_compression_torch(spec, clip_val=clip_val) - # print(444,spec) - return spec - - def __call__(self, audiopath): - audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr) - spect = self.get_mel(audio.unsqueeze(0)).squeeze(0) - return spect - -stft = STFT() \ No newline at end of file diff --git a/spaces/ulysses115/diffsvc_test/preprocessing/base_binarizer.py b/spaces/ulysses115/diffsvc_test/preprocessing/base_binarizer.py deleted file mode 100644 index f070584c60c091c3fa4bc1da377733698b0165b6..0000000000000000000000000000000000000000 --- a/spaces/ulysses115/diffsvc_test/preprocessing/base_binarizer.py +++ /dev/null @@ -1,237 +0,0 @@ -import os -from webbrowser import get -os.environ["OMP_NUM_THREADS"] = "1" -import yaml -from utils.multiprocess_utils import chunked_multiprocess_run -import random -import json -# from resemblyzer import VoiceEncoder -from tqdm import tqdm -from preprocessing.data_gen_utils import get_mel2ph, get_pitch_parselmouth, build_phone_encoder,get_pitch_crepe -from utils.hparams import set_hparams, hparams -import numpy as np -from utils.indexed_datasets import IndexedDatasetBuilder - - -class BinarizationError(Exception): - pass - -BASE_ITEM_ATTRIBUTES = ['txt', 'ph', 'wav_fn', 'tg_fn', 'spk_id'] - -class BaseBinarizer: - ''' - Base class for data processing. - 1. *process* and *process_data_split*: - process entire data, generate the train-test split (support parallel processing); - 2. *process_item*: - process singe piece of data; - 3. *get_pitch*: - infer the pitch using some algorithm; - 4. *get_align*: - get the alignment using 'mel2ph' format (see https://arxiv.org/abs/1905.09263). - 5. phoneme encoder, voice encoder, etc. - - Subclasses should define: - 1. *load_metadata*: - how to read multiple datasets from files; - 2. *train_item_names*, *valid_item_names*, *test_item_names*: - how to split the dataset; - 3. load_ph_set: - the phoneme set. - ''' - def __init__(self, item_attributes=BASE_ITEM_ATTRIBUTES): - self.binarization_args = hparams['binarization_args'] - #self.pre_align_args = hparams['pre_align_args'] - - self.items = {} - # every item in self.items has some attributes - self.item_attributes = item_attributes - - self.load_meta_data() - # check program correctness 检查itemdict的key只能在给定的列表中取值 - assert all([attr in self.item_attributes for attr in list(self.items.values())[0].keys()]) - self.item_names = sorted(list(self.items.keys())) - - if self.binarization_args['shuffle']: - random.seed(1234) - random.shuffle(self.item_names) - - # set default get_pitch algorithm - if hparams['use_crepe']: - self.get_pitch_algorithm = get_pitch_crepe - else: - self.get_pitch_algorithm = get_pitch_parselmouth - - def load_meta_data(self): - raise NotImplementedError - - @property - def train_item_names(self): - raise NotImplementedError - - @property - def valid_item_names(self): - raise NotImplementedError - - @property - def test_item_names(self): - raise NotImplementedError - - def build_spk_map(self): - spk_map = set() - for item_name in self.item_names: - spk_name = self.items[item_name]['spk_id'] - spk_map.add(spk_name) - spk_map = {x: i for i, x in enumerate(sorted(list(spk_map)))} - assert len(spk_map) == 0 or len(spk_map) <= hparams['num_spk'], len(spk_map) - return spk_map - - def item_name2spk_id(self, item_name): - return self.spk_map[self.items[item_name]['spk_id']] - - def _phone_encoder(self): - ''' - use hubert encoder - ''' - raise NotImplementedError - ''' - create 'phone_set.json' file if it doesn't exist - ''' - ph_set_fn = f"{hparams['binary_data_dir']}/phone_set.json" - ph_set = [] - if hparams['reset_phone_dict'] or not os.path.exists(ph_set_fn): - self.load_ph_set(ph_set) - ph_set = sorted(set(ph_set)) - json.dump(ph_set, open(ph_set_fn, 'w', encoding='utf-8')) - print("| Build phone set: ", ph_set) - else: - ph_set = json.load(open(ph_set_fn, 'r', encoding='utf-8')) - print("| Load phone set: ", ph_set) - return build_phone_encoder(hparams['binary_data_dir']) - - - def load_ph_set(self, ph_set): - raise NotImplementedError - - def meta_data_iterator(self, prefix): - if prefix == 'valid': - item_names = self.valid_item_names - elif prefix == 'test': - item_names = self.test_item_names - else: - item_names = self.train_item_names - for item_name in item_names: - meta_data = self.items[item_name] - yield item_name, meta_data - - def process(self): - os.makedirs(hparams['binary_data_dir'], exist_ok=True) - self.spk_map = self.build_spk_map() - print("| spk_map: ", self.spk_map) - spk_map_fn = f"{hparams['binary_data_dir']}/spk_map.json" - json.dump(self.spk_map, open(spk_map_fn, 'w', encoding='utf-8')) - - self.phone_encoder =self._phone_encoder() - self.process_data_split('valid') - self.process_data_split('test') - self.process_data_split('train') - - def process_data_split(self, prefix): - data_dir = hparams['binary_data_dir'] - args = [] - builder = IndexedDatasetBuilder(f'{data_dir}/{prefix}') - lengths = [] - f0s = [] - total_sec = 0 - # if self.binarization_args['with_spk_embed']: - # voice_encoder = VoiceEncoder().cuda() - - for item_name, meta_data in self.meta_data_iterator(prefix): - args.append([item_name, meta_data, self.binarization_args]) - spec_min=[] - spec_max=[] - # code for single cpu processing - for i in tqdm(reversed(range(len(args))), total=len(args)): - a = args[i] - item = self.process_item(*a) - if item is None: - continue - spec_min.append(item['spec_min']) - spec_max.append(item['spec_max']) - # item['spk_embe'] = voice_encoder.embed_utterance(item['wav']) \ - # if self.binardization_args['with_spk_embed'] else None - if not self.binarization_args['with_wav'] and 'wav' in item: - if hparams['debug']: - print("del wav") - del item['wav'] - if(hparams['debug']): - print(item) - builder.add_item(item) - lengths.append(item['len']) - total_sec += item['sec'] - # if item.get('f0') is not None: - # f0s.append(item['f0']) - if prefix=='train': - spec_max=np.max(spec_max,0) - spec_min=np.min(spec_min,0) - print(spec_max.shape) - with open(hparams['config_path'], encoding='utf-8') as f: - _hparams=yaml.safe_load(f) - _hparams['spec_max']=spec_max.tolist() - _hparams['spec_min']=spec_min.tolist() - with open(hparams['config_path'], 'w', encoding='utf-8') as f: - yaml.safe_dump(_hparams,f) - builder.finalize() - np.save(f'{data_dir}/{prefix}_lengths.npy', lengths) - if len(f0s) > 0: - f0s = np.concatenate(f0s, 0) - f0s = f0s[f0s != 0] - np.save(f'{data_dir}/{prefix}_f0s_mean_std.npy', [np.mean(f0s).item(), np.std(f0s).item()]) - print(f"| {prefix} total duration: {total_sec:.3f}s") - - def process_item(self, item_name, meta_data, binarization_args): - from preprocessing.process_pipeline import File2Batch - return File2Batch.temporary_dict2processed_input(item_name, meta_data, self.phone_encoder, binarization_args) - - def get_align(self, meta_data, mel, phone_encoded, res): - raise NotImplementedError - - def get_align_from_textgrid(self, meta_data, mel, phone_encoded, res): - ''' - NOTE: this part of script is *isolated* from other scripts, which means - it may not be compatible with the current version. - ''' - return - tg_fn, ph = meta_data['tg_fn'], meta_data['ph'] - if tg_fn is not None and os.path.exists(tg_fn): - mel2ph, dur = get_mel2ph(tg_fn, ph, mel, hparams) - else: - raise BinarizationError(f"Align not found") - if mel2ph.max() - 1 >= len(phone_encoded): - raise BinarizationError( - f"Align does not match: mel2ph.max() - 1: {mel2ph.max() - 1}, len(phone_encoded): {len(phone_encoded)}") - res['mel2ph'] = mel2ph - res['dur'] = dur - - def get_f0cwt(self, f0, res): - ''' - NOTE: this part of script is *isolated* from other scripts, which means - it may not be compatible with the current version. - ''' - return - from utils.cwt import get_cont_lf0, get_lf0_cwt - uv, cont_lf0_lpf = get_cont_lf0(f0) - logf0s_mean_org, logf0s_std_org = np.mean(cont_lf0_lpf), np.std(cont_lf0_lpf) - cont_lf0_lpf_norm = (cont_lf0_lpf - logf0s_mean_org) / logf0s_std_org - Wavelet_lf0, scales = get_lf0_cwt(cont_lf0_lpf_norm) - if np.any(np.isnan(Wavelet_lf0)): - raise BinarizationError("NaN CWT") - res['cwt_spec'] = Wavelet_lf0 - res['cwt_scales'] = scales - res['f0_mean'] = logf0s_mean_org - res['f0_std'] = logf0s_std_org - - -if __name__ == "__main__": - set_hparams() - BaseBinarizer().process() diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/City Car Driving 2.2.7 Crack The Best Way to Master the Basic Skills of Car Driving.md b/spaces/usbethFlerru/sovits-modelsV2/example/City Car Driving 2.2.7 Crack The Best Way to Master the Basic Skills of Car Driving.md deleted file mode 100644 index cb9c9111d3320e6de31e11c89735ccd2d4a07c06..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/City Car Driving 2.2.7 Crack The Best Way to Master the Basic Skills of Car Driving.md +++ /dev/null @@ -1,5 +0,0 @@ - -

    The car driving game named "City Car Driving" is a new car simulator, designed to help users experience car driving in а big city, the countryside and in different conditions or go just for a joy ride. Special stress in the "City Car Driving" simulator has been laid on a variety of different road situations and realistic car driving.

    -

    city car driving 2.2.7 crack free download


    DOWNLOAD ····· https://urlcod.com/2uyXUt



    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/user238921933/stable-diffusion-webui/extensions/deforum/scripts/deforum.py b/spaces/user238921933/stable-diffusion-webui/extensions/deforum/scripts/deforum.py deleted file mode 100644 index 50c188c2475f58572540f615d957f8d28d3f019d..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/extensions/deforum/scripts/deforum.py +++ /dev/null @@ -1,318 +0,0 @@ -# Detach 'deforum_helpers' from 'scripts' to prevent "No module named 'scripts.deforum_helpers'" error -# causing Deforum's tab not show up in some cases when you've might've broken the environment with webui packages updates -import sys, os, shutil - -basedirs = [os.getcwd()] -if 'google.colab' in sys.modules: - basedirs.append('/content/gdrive/MyDrive/sd/stable-diffusion-webui') #hardcode as TheLastBen's colab seems to be the primal source - -for basedir in basedirs: - deforum_paths_to_ensure = [basedir + '/extensions/deforum-for-automatic1111-webui/scripts', basedir + '/extensions/sd-webui-controlnet', basedir + '/extensions/deforum/scripts', basedir + '/scripts/deforum_helpers/src', basedir + '/extensions/deforum/scripts/deforum_helpers/src', basedir +'/extensions/deforum-for-automatic1111-webui/scripts/deforum_helpers/src',basedir] - - for deforum_scripts_path_fix in deforum_paths_to_ensure: - if not deforum_scripts_path_fix in sys.path: - sys.path.extend([deforum_scripts_path_fix]) - -# Main deforum stuff -import deforum_helpers.args as deforum_args -import deforum_helpers.settings as deforum_settings -from deforum_helpers.save_images import dump_frames_cache, reset_frames_cache -from deforum_helpers.frame_interpolation import process_video_interpolation - -import modules.scripts as wscripts -from modules import script_callbacks -import gradio as gr -import json - -from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images -from PIL import Image -from deforum_helpers.video_audio_utilities import ffmpeg_stitch_video, make_gifski_gif -from deforum_helpers.upscaling import make_upscale_v2 -import gc -import torch -from webui import wrap_gradio_gpu_call -import modules.shared as shared -from modules.shared import opts, cmd_opts, state -from modules.ui import create_output_panel, plaintext_to_html, wrap_gradio_call -from types import SimpleNamespace - -def run_deforum(*args, **kwargs): - args_dict = {deforum_args.component_names[i]: args[i+2] for i in range(0, len(deforum_args.component_names))} - p = StableDiffusionProcessingImg2Img( - sd_model=shared.sd_model, - outpath_samples = opts.outdir_samples or opts.outdir_img2img_samples, - outpath_grids = opts.outdir_grids or opts.outdir_img2img_grids, - #we'll setup the rest later - ) - - print("\033[4;33mDeforum extension for auto1111 webui, v2.2b\033[0m") - args_dict['self'] = None - args_dict['p'] = p - - root, args, anim_args, video_args, parseq_args, loop_args, controlnet_args = deforum_args.process_args(args_dict) - root.clipseg_model = None - root.initial_clipskip = opts.data["CLIP_stop_at_last_layers"] - root.basedirs = basedirs - - for basedir in basedirs: - sys.path.extend([ - basedir + '/scripts/deforum_helpers/src', - basedir + '/extensions/deforum/scripts/deforum_helpers/src', - basedir + '/extensions/deforum-for-automatic1111-webui/scripts/deforum_helpers/src', - ]) - - # clean up unused memory - reset_frames_cache(root) - gc.collect() - torch.cuda.empty_cache() - - from deforum_helpers.render import render_animation - from deforum_helpers.render_modes import render_input_video, render_animation_with_video_mask, render_interpolation - - tqdm_backup = shared.total_tqdm - shared.total_tqdm = deforum_settings.DeforumTQDM(args, anim_args, parseq_args) - try: - # dispatch to appropriate renderer - if anim_args.animation_mode == '2D' or anim_args.animation_mode == '3D': - if anim_args.use_mask_video: - render_animation_with_video_mask(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root.animation_prompts, root) # allow mask video without an input video - else: - render_animation(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root.animation_prompts, root) - elif anim_args.animation_mode == 'Video Input': - render_input_video(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root.animation_prompts, root)#TODO: prettify code - elif anim_args.animation_mode == 'Interpolation': - render_interpolation(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, root.animation_prompts, root) - else: - print('Other modes are not available yet!') - finally: - shared.total_tqdm = tqdm_backup - opts.data["CLIP_stop_at_last_layers"] = root.initial_clipskip - - if video_args.store_frames_in_ram: - dump_frames_cache(root) - - from base64 import b64encode - - real_audio_track = None - if video_args.add_soundtrack != 'None': - real_audio_track = anim_args.video_init_path if video_args.add_soundtrack == 'Init Video' else video_args.soundtrack_path - - # Delete folder with duplicated imgs from OS temp folder - shutil.rmtree(root.tmp_deforum_run_duplicated_folder, ignore_errors=True) - - # Decide whether or not we need to try and frame interpolate laters - need_to_frame_interpolate = False - if video_args.frame_interpolation_engine != "None" and not video_args.skip_video_for_run_all and not video_args.store_frames_in_ram: - need_to_frame_interpolate = True - - if video_args.skip_video_for_run_all: - print('Skipping video creation, uncheck skip_video_for_run_all if you want to run it') - else: - import subprocess - - path_name_modifier = video_args.path_name_modifier - if video_args.render_steps: # render steps from a single image - fname = f"{path_name_modifier}_%05d.png" - all_step_dirs = [os.path.join(args.outdir, d) for d in os.listdir(args.outdir) if os.path.isdir(os.path.join(args.outdir,d))] - newest_dir = max(all_step_dirs, key=os.path.getmtime) - image_path = os.path.join(newest_dir, fname) - print(f"Reading images from {image_path}") - mp4_path = os.path.join(newest_dir, f"{args.timestring}_{path_name_modifier}.mp4") - max_video_frames = args.steps - else: # render images for a video - image_path = os.path.join(args.outdir, f"{args.timestring}_%05d.png") - mp4_path = os.path.join(args.outdir, f"{args.timestring}.mp4") - max_video_frames = anim_args.max_frames - - exclude_keys = deforum_settings.get_keys_to_exclude('video') - video_settings_filename = os.path.join(args.outdir, f"{args.timestring}_video-settings.txt") - with open(video_settings_filename, "w+", encoding="utf-8") as f: - s = {} - for key, value in dict(video_args.__dict__).items(): - if key not in exclude_keys: - s[key] = value - json.dump(s, f, ensure_ascii=False, indent=4) - - # Stitch video using ffmpeg! - try: - ffmpeg_stitch_video(ffmpeg_location=video_args.ffmpeg_location, fps=video_args.fps, outmp4_path=mp4_path, stitch_from_frame=0, stitch_to_frame=max_video_frames, imgs_path=image_path, add_soundtrack=video_args.add_soundtrack, audio_path=real_audio_track, crf=video_args.ffmpeg_crf, preset=video_args.ffmpeg_preset) - mp4 = open(mp4_path,'rb').read() - data_url = "data:video/mp4;base64," + b64encode(mp4).decode() - deforum_args.i1_store = f'

    Deforum v0.5-webui-beta

    ' - except Exception as e: - if need_to_frame_interpolate: - print(f"FFMPEG DID NOT STITCH ANY VIDEO. However, you requested to frame interpolate - so we will continue to frame interpolation, but you'll be left only with the interpolated frames and not a video, since ffmpeg couldn't run. Original ffmpeg error: {e}") - else: - print(f"** FFMPEG DID NOT STITCH ANY VIDEO ** Error: {e}") - pass - - if root.initial_info is None: - root.initial_info = "An error has occured and nothing has been generated!" - root.initial_info += "\nPlease, report the bug to https://github.com/deforum-art/deforum-for-automatic1111-webui/issues" - import numpy as np - a = np.random.rand(args.W, args.H, 3)*255 - root.first_frame = Image.fromarray(a.astype('uint8')).convert('RGB') - root.initial_seed = 6934 - # FRAME INTERPOLATION TIME - if need_to_frame_interpolate: - print(f"Got a request to *frame interpolate* using {video_args.frame_interpolation_engine}") - process_video_interpolation(frame_interpolation_engine=video_args.frame_interpolation_engine, frame_interpolation_x_amount=video_args.frame_interpolation_x_amount,frame_interpolation_slow_mo_enabled=video_args.frame_interpolation_slow_mo_enabled, frame_interpolation_slow_mo_amount=video_args.frame_interpolation_slow_mo_amount, orig_vid_fps=video_args.fps, deforum_models_path=root.models_path, real_audio_track=real_audio_track, raw_output_imgs_path=args.outdir, img_batch_id=args.timestring, ffmpeg_location=video_args.ffmpeg_location, ffmpeg_crf=video_args.ffmpeg_crf, ffmpeg_preset=video_args.ffmpeg_preset, keep_interp_imgs=video_args.frame_interpolation_keep_imgs, orig_vid_name=None, resolution=None) - - if video_args.make_gif and not video_args.skip_video_for_run_all and not video_args.store_frames_in_ram: - make_gifski_gif(imgs_raw_path = args.outdir, imgs_batch_id = args.timestring, fps = video_args.fps, models_folder = root.models_path, current_user_os = root.current_user_os) - - # Upscale video once generation is done: - if video_args.r_upscale_video and not video_args.skip_video_for_run_all and not video_args.store_frames_in_ram: - - # out mp4 path is defined in make_upscale func - make_upscale_v2(upscale_factor = video_args.r_upscale_factor, upscale_model = video_args.r_upscale_model, keep_imgs = video_args.r_upscale_keep_imgs, imgs_raw_path = args.outdir, imgs_batch_id = args.timestring, fps = video_args.fps, deforum_models_path = root.models_path, current_user_os = root.current_user_os, ffmpeg_location=video_args.ffmpeg_location, stitch_from_frame=0, stitch_to_frame=max_video_frames, ffmpeg_crf=video_args.ffmpeg_crf, ffmpeg_preset=video_args.ffmpeg_preset, add_soundtrack = video_args.add_soundtrack ,audio_path=real_audio_track) - - root.initial_info += "\n The animation is stored in " + args.outdir - root.initial_info += "\n Timestring = " + args.timestring + '\n' - root.initial_info += "Only the first frame is shown in webui not to clutter the memory" - reset_frames_cache(root) # cleanup the RAM in any case - processed = Processed(p, [root.first_frame], root.initial_seed, root.initial_info) - - if processed is None: - processed = process_images(p) - - shared.total_tqdm.clear() - - generation_info_js = processed.js() - if opts.samples_log_stdout: - print(generation_info_js) - - if opts.do_not_show_images: - processed.images = [] - - return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html('') - -def on_ui_tabs(): - with gr.Blocks(analytics_enabled=False) as deforum_interface: - components = {} - dummy_component = gr.Label(visible=False) - with gr.Row(elem_id='deforum_progress_row').style(equal_height=False): - with gr.Column(scale=1, variant='panel'): - components = deforum_args.setup_deforum_setting_dictionary(None, True, True) - - with gr.Column(scale=1): - with gr.Row(): - btn = gr.Button("Click here after the generation to show the video") - components['btn'] = btn - close_btn = gr.Button("Close the video", visible=False) - with gr.Row(): - i1 = gr.HTML(deforum_args.i1_store, elem_id='deforum_header') - components['i1'] = i1 - # Show video - def show_vid(): - return { - i1: gr.update(value=deforum_args.i1_store, visible=True), - close_btn: gr.update(visible=True), - btn: gr.update(value="Update the video", visible=True), - } - - btn.click( - show_vid, - [], - [i1, close_btn, btn], - ) - # Close video - def close_vid(): - return { - i1: gr.update(value=deforum_args.i1_store_backup, visible=True), - close_btn: gr.update(visible=False), - btn: gr.update(value="Click here after the generation to show the video", visible=True), - } - - close_btn.click( - close_vid, - [], - [i1, close_btn, btn], - ) - id_part = 'deforum' - with gr.Row(elem_id=f"{id_part}_generate_box"): - skip = gr.Button('Skip', elem_id=f"{id_part}_skip", visible=False) - interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", visible=True) - submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') - - skip.click( - fn=lambda: state.skip(), - inputs=[], - outputs=[], - ) - - interrupt.click( - fn=lambda: state.interrupt(), - inputs=[], - outputs=[], - ) - - deforum_gallery, generation_info, html_info, html_log = create_output_panel("deforum", opts.outdir_img2img_samples) - - gr.HTML("

    * Paths can be relative to webui folder OR full - absolute

    ") - with gr.Row(): - settings_path = gr.Textbox("deforum_settings.txt", elem_id='deforum_settings_path', label="General Settings File") - #reuse_latest_settings_btn = gr.Button('Reuse Latest', elem_id='deforum_reuse_latest_settings_btn')#TODO - with gr.Row(): - save_settings_btn = gr.Button('Save Settings', elem_id='deforum_save_settings_btn') - load_settings_btn = gr.Button('Load Settings', elem_id='deforum_load_settings_btn') - with gr.Row(): - video_settings_path = gr.Textbox("deforum_video-settings.txt", elem_id='deforum_video_settings_path', label="Video Settings File") - #reuse_latest_video_settings_btn = gr.Button('Reuse Latest', elem_id='deforum_reuse_latest_video_settings_btn')#TODO - with gr.Row(): - save_video_settings_btn = gr.Button('Save Video Settings', elem_id='deforum_save_video_settings_btn') - load_video_settings_btn = gr.Button('Load Video Settings', elem_id='deforum_load_video_settings_btn') - - # components['prompts'].visible = False#hide prompts for the time being - #TODO clean up the code - components['save_sample_per_step'].visible = False - components['show_sample_per_step'].visible = False - components['display_samples'].visible = False - - component_list = [components[name] for name in deforum_args.component_names] - - submit.click( - fn=wrap_gradio_gpu_call(run_deforum, extra_outputs=[None, '', '']), - _js="submit_deforum", - inputs=[dummy_component, dummy_component] + component_list, - outputs=[ - deforum_gallery, - generation_info, - html_info, - html_log, - ], - ) - - settings_component_list = [components[name] for name in deforum_args.settings_component_names] - video_settings_component_list = [components[name] for name in deforum_args.video_args_names] - stuff = gr.HTML("") # wrap gradio call garbage - stuff.visible = False - - save_settings_btn.click( - fn=wrap_gradio_call(deforum_settings.save_settings), - inputs=[settings_path] + settings_component_list, - outputs=[stuff], - ) - - load_settings_btn.click( - fn=wrap_gradio_call(deforum_settings.load_settings), - inputs=[settings_path]+ settings_component_list, - outputs=settings_component_list + [stuff], - ) - - save_video_settings_btn.click( - fn=wrap_gradio_call(deforum_settings.save_video_settings), - inputs=[video_settings_path] + video_settings_component_list, - outputs=[stuff], - ) - - load_video_settings_btn.click( - fn=wrap_gradio_call(deforum_settings.load_video_settings), - inputs=[video_settings_path] + video_settings_component_list, - outputs=video_settings_component_list + [stuff], - ) - - - return [(deforum_interface, "Deforum", "deforum_interface")] - -script_callbacks.on_ui_tabs(on_ui_tabs) diff --git a/spaces/viait/vscode/start_server.sh b/spaces/viait/vscode/start_server.sh deleted file mode 100644 index 5257809d2ea2bcb6ccb3b55473da34eb13982a36..0000000000000000000000000000000000000000 --- a/spaces/viait/vscode/start_server.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -echo "Starting VSCode Server..." - -exec /app/openvscode-server/bin/openvscode-server --host 0.0.0.0 --port 7860 --without-connection-token \"${@}\" -- diff --git a/spaces/vivym/image-matting-app/ppmatting/transforms/__init__.py b/spaces/vivym/image-matting-app/ppmatting/transforms/__init__.py deleted file mode 100644 index 7986cdd642998fb0638a81c9ea22615faf8bad0b..0000000000000000000000000000000000000000 --- a/spaces/vivym/image-matting-app/ppmatting/transforms/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .transforms import * diff --git a/spaces/w1zrd/MusicGen/MODEL_CARD.md b/spaces/w1zrd/MusicGen/MODEL_CARD.md deleted file mode 100644 index 6c2c9f883969eb905e74ad3376966d156cc5ca00..0000000000000000000000000000000000000000 --- a/spaces/w1zrd/MusicGen/MODEL_CARD.md +++ /dev/null @@ -1,81 +0,0 @@ -# MusicGen Model Card - -## Model details - -**Organization developing the model:** The FAIR team of Meta AI. - -**Model date:** MusicGen was trained between April 2023 and May 2023. - -**Model version:** This is the version 1 of the model. - -**Model type:** MusicGen consists of an EnCodec model for audio tokenization, an auto-regressive language model based on the transformer architecture for music modeling. The model comes in different sizes: 300M, 1.5B and 3.3B parameters ; and two variants: a model trained for text-to-music generation task and a model trained for melody-guided music generation. - -**Paper or resources for more information:** More information can be found in the paper [Simple and Controllable Music Generation][arxiv]. - -**Citation details** See [our paper][arxiv] - -**License** Code is released under MIT, model weights are released under CC-BY-NC 4.0. - -**Where to send questions or comments about the model:** Questions and comments about MusicGen can be sent via the [Github repository](https://github.com/facebookresearch/audiocraft) of the project, or by opening an issue. - -## Intended use -**Primary intended use:** The primary use of MusicGen is research on AI-based music generation, including: - -- Research efforts, such as probing and better understanding the limitations of generative models to further improve the state of science -- Generation of music guided by text or melody to understand current abilities of generative AI models by machine learning amateurs - -**Primary intended users:** The primary intended users of the model are researchers in audio, machine learning and artificial intelligence, as well as amateur seeking to better understand those models. - -**Out-of-scope use cases** The model should not be used on downstream applications without further risk evaluation and mitigation. The model should not be used to intentionally create or disseminate music pieces that create hostile or alienating environments for people. This includes generating music that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes. - -## Metrics - -**Models performance measures:** We used the following objective measure to evaluate the model on a standard music benchmark: - -- Frechet Audio Distance computed on features extracted from a pre-trained audio classifier (VGGish) -- Kullback-Leibler Divergence on label distributions extracted from a pre-trained audio classifier (PaSST) -- CLAP Score between audio embedding and text embedding extracted from a pre-trained CLAP model - -Additionally, we run qualitative studies with human participants, evaluating the performance of the model with the following axes: - -- Overall quality of the music samples; -- Text relevance to the provided text input; -- Adherence to the melody for melody-guided music generation. - -More details on performance measures and human studies can be found in the paper. - -**Decision thresholds:** Not applicable. - -## Evaluation datasets - -The model was evaluated on the [MusicCaps benchmark](https://www.kaggle.com/datasets/googleai/musiccaps) and on an in-domain held-out evaluation set, with no artist overlap with the training set. - -## Training datasets - -The model was trained on licensed data using the following sources: the [Meta Music Initiative Sound Collection](https://www.fb.com/sound), [Shutterstock music collection](https://www.shutterstock.com/music) and the [Pond5 music collection](https://www.pond5.com/). See the paper for more details about the training set and corresponding preprocessing. - -## Quantitative analysis - -More information can be found in the paper [Simple and Controllable Music Generation][arxiv], in the Experimental Setup section. - -## Limitations and biases - -**Data:** The data sources used to train the model are created by music professionals and covered by legal agreements with the right holders. The model is trained on 20K hours of data, we believe that scaling the model on larger datasets can further improve the performance of the model. - -**Mitigations:** Vocals have been removed from the data source using corresponding tags, and then using using a state-of-the-art music source separation method, namely using the open source [Hybrid Transformer for Music Source Separation](https://github.com/facebookresearch/demucs) (HT-Demucs). - -**Limitations:** - -- The model is not able to generate realistic vocals. -- The model has been trained with English descriptions and will not perform as well in other languages. -- The model does not perform equally well for all music styles and cultures. -- The model sometimes generates end of songs, collapsing to silence. -- It is sometimes difficult to assess what types of text descriptions provide the best generations. Prompt engineering may be required to obtain satisfying results. - -**Biases:** The source of data is potentially lacking diversity and all music cultures are not equally represented in the dataset. The model may not perform equally well on the wide variety of music genres that exists. The generated samples from the model will reflect the biases from the training data. Further work on this model should include methods for balanced and just representations of cultures, for example, by scaling the training data to be both diverse and inclusive. - -**Risks and harms:** Biases and limitations of the model may lead to generation of samples that may be considered as biased, inappropriate or offensive. We believe that providing the code to reproduce the research and train new models will allow to broaden the application to new and more representative data. - -**Use cases:** Users must be aware of the biases, limitations and risks of the model. MusicGen is a model developed for artificial intelligence research on controllable music generation. As such, it should not be used for downstream applications without further investigation and mitigation of risks. - -[arxiv]: https://arxiv.org/abs/2306.05284 diff --git a/spaces/wangbinhu/bingo/Dockerfile b/spaces/wangbinhu/bingo/Dockerfile deleted file mode 100644 index c677b05b75f7e4b2beee8c97fb47957a0861a83e..0000000000000000000000000000000000000000 --- a/spaces/wangbinhu/bingo/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM weaigc/bingo:latest - -ARG DEBIAN_FRONTEND=noninteractive - -ENV BING_HEADER "" - -CMD npm start diff --git a/spaces/webis-huggingface-workshop/f_demo_question_gen/app.py b/spaces/webis-huggingface-workshop/f_demo_question_gen/app.py deleted file mode 100644 index 6989e825433de6ea07cf397c6ab813e87288b31b..0000000000000000000000000000000000000000 --- a/spaces/webis-huggingface-workshop/f_demo_question_gen/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("huggingface/iarfmoose/t5-base-question-generator").launch(); \ No newline at end of file diff --git a/spaces/weiren119/AudiogramDigitization/src/digitizer/report_components/symbol.py b/spaces/weiren119/AudiogramDigitization/src/digitizer/report_components/symbol.py deleted file mode 100644 index 7e9c72567677cbdba98447b1fa6fc9a14e0686f0..0000000000000000000000000000000000000000 --- a/spaces/weiren119/AudiogramDigitization/src/digitizer/report_components/symbol.py +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env python3 -""" -Copyright (c) 2020, Carleton University Biomedical Informatics Collaboratory - -This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree. -""" - -from typing import List, Optional, Type -import PIL.ImageDraw - -from interfaces import SymbolDict -from .line import Line -from .label import Label -import utils.audiology as Audiology -from utils.geometry import get_bounding_box_relative_to_original_report - -class Symbol(object): - - def __init__(self, symbol_dict: dict, audiogram_coordinates: dict, correction_angle: float): - bbox = symbol_dict["boundingBox"] - self.p1 = { - "x": bbox["x"], - "y": bbox["y"] - } - - self.p2 = { - "x": bbox["x"] + bbox["width"], - "y": bbox["y"] + bbox["height"] - } - - self.dimensions = { - "width": bbox["width"], - "height": bbox["height"] - } - - self.absolute_bounding_box = get_bounding_box_relative_to_original_report(bbox, audiogram_coordinates, correction_angle) - - self.ear = "left" if "left" in symbol_dict["measurementType"].lower() else "right" - self.masking = False if "unmasked" in symbol_dict["measurementType"].lower() else True - self.conduction = "air" if "air" in symbol_dict["measurementType"].lower() else "bone" - self.measurement_type = symbol_dict["measurementType"] - self.confidence = symbol_dict["confidence"] - - def draw(self, canvas: PIL.ImageDraw): - """Draws the symbol's bounding box on the canvas (image) passed. - - Parameters - ---------- - canvas : PIL.ImageDraw - The PIL.ImageDraw on which the symbol is to be displayed. - """ - color = "rgb(255,0,0)" if self.is_frequency() else "rgb(0,0,255)" - canvas.rectangle( - (self.p1["x"], self.p1["y"], self.p2["x"], self.p2["y"]), - outline=color, - width=3 - ) - canvas.text((self.p1["x"], self.p1["y"] - 10), str(self.get_value()), fill=color) - - def get_center(self): - """Returns the center of the symbol's bounding box. - - Returns - ------- - dict - A dictionary describing the center of the symbol's bounding box - of the form { "x": int, "y": int }. - """ - center = { - "x": (self.p1["x"] + self.p2["x"]) / 2, - "y": (self.p1["y"] + self.p2["y"]) / 2 - } - return center - - def to_dict(self) -> dict: - """Serializes the symbol to a dictionary. - - Returns - ------- - dict - A dictionary representing the symbol. - """ - return { - "boundingBox": self.absolute_bounding_box, - "ear": self.ear, - "conduction": self.conduction, - "masking": self.masking, - "confidence": self.confidence, - "response": True, - "measurementType": self.measurement_type - } - - def __str__(self): - return f"Threshold(ear={self.ear}, conduction={self.conduction})" - - def __repr__(self): - return self.__str__() diff --git a/spaces/weiren119/AudiogramDigitization/src/digitizer/yolov5/utils/torch_utils.py b/spaces/weiren119/AudiogramDigitization/src/digitizer/yolov5/utils/torch_utils.py deleted file mode 100644 index 52a1264bd9fb803bce1007636e05eaf9b2dbb29f..0000000000000000000000000000000000000000 --- a/spaces/weiren119/AudiogramDigitization/src/digitizer/yolov5/utils/torch_utils.py +++ /dev/null @@ -1,231 +0,0 @@ -import logging -import math -import os -import time -from copy import deepcopy - -import torch -import torch.backends.cudnn as cudnn -import torch.nn as nn -import torch.nn.functional as F -import torchvision.models as models - -logger = logging.getLogger(__name__) - - -def init_seeds(seed=0): - torch.manual_seed(seed) - - # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html - if seed == 0: # slower, more reproducible - cudnn.deterministic = True - cudnn.benchmark = False - else: # faster, less reproducible - cudnn.deterministic = False - cudnn.benchmark = True - - -def select_device(device='', batch_size=None): - # device = 'cpu' or '0' or '0,1,2,3' - cpu_request = device.lower() == 'cpu' - if device and not cpu_request: # if device requested other than 'cpu' - os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device # check availablity - - cuda = False if cpu_request else torch.cuda.is_available() - if cuda: - c = 1024 ** 2 # bytes to MB - ng = torch.cuda.device_count() - if ng > 1 and batch_size: # check that batch_size is compatible with device_count - assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng) - x = [torch.cuda.get_device_properties(i) for i in range(ng)] - s = 'Using CUDA ' - for i in range(0, ng): - if i == 1: - s = ' ' * len(s) - #logger.info("%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)" % - #(s, i, x[i].name, x[i].total_memory / c)) - else: - pass - #logger.info('Using CPU') - - #logger.info('') # skip a line - return torch.device('cuda:0' if cuda else 'cpu') - - -def time_synchronized(): - torch.cuda.synchronize() if torch.cuda.is_available() else None - return time.time() - - -def is_parallel(model): - return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) - - -def intersect_dicts(da, db, exclude=()): - # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} - - -def initialize_weights(model): - for m in model.modules(): - t = type(m) - if t is nn.Conv2d: - pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif t is nn.BatchNorm2d: - m.eps = 1e-3 - m.momentum = 0.03 - elif t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]: - m.inplace = True - - -def find_modules(model, mclass=nn.Conv2d): - # Finds layer indices matching module class 'mclass' - return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] - - -def sparsity(model): - # Return global model sparsity - a, b = 0., 0. - for p in model.parameters(): - a += p.numel() - b += (p == 0).sum() - return b / a - - -def prune(model, amount=0.3): - # Prune model to requested global sparsity - import torch.nn.utils.prune as prune - print('Pruning model... ', end='') - for name, m in model.named_modules(): - if isinstance(m, nn.Conv2d): - prune.l1_unstructured(m, name='weight', amount=amount) # prune - prune.remove(m, 'weight') # make permanent - print(' %.3g global sparsity' % sparsity(model)) - - -def fuse_conv_and_bn(conv, bn): - # https://tehnokv.com/posts/fusing-batchnorm-and-conv/ - with torch.no_grad(): - # init - fusedconv = nn.Conv2d(conv.in_channels, - conv.out_channels, - kernel_size=conv.kernel_size, - stride=conv.stride, - padding=conv.padding, - bias=True).to(conv.weight.device) - - # prepare filters - w_conv = conv.weight.clone().view(conv.out_channels, -1) - w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) - fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size())) - - # prepare spatial bias - b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias - b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) - fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) - - return fusedconv - - -def model_info(model, verbose=False): - # Plots a line-by-line description of a PyTorch model - n_p = sum(x.numel() for x in model.parameters()) # number parameters - n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients - if verbose: - print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) - for i, (name, p) in enumerate(model.named_parameters()): - name = name.replace('module_list.', '') - print('%5g %40s %9s %12g %20s %10.3g %10.3g' % - (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) - - try: # FLOPS - from thop import profile - flops = profile(deepcopy(model), inputs=(torch.zeros(1, 3, 64, 64),), verbose=False)[0] / 1E9 * 2 - fs = ', %.1f GFLOPS' % (flops * 100) # 640x640 FLOPS - except: - fs = '' - - #logger.info( - # 'Model Summary: %g layers, %g parameters, %g gradients%s' % (len(list(model.parameters())), n_p, n_g, fs)) - - -def load_classifier(name='resnet101', n=2): - # Loads a pretrained model reshaped to n-class output - model = models.__dict__[name](pretrained=True) - - # Display model properties - input_size = [3, 224, 224] - input_space = 'RGB' - input_range = [0, 1] - mean = [0.485, 0.456, 0.406] - std = [0.229, 0.224, 0.225] - for x in ['input_size', 'input_space', 'input_range', 'mean', 'std']: - print(x + ' =', eval(x)) - - # Reshape output to n classes - filters = model.fc.weight.shape[1] - model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) - model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) - model.fc.out_features = n - return model - - -def scale_img(img, ratio=1.0, same_shape=False): # img(16,3,256,416), r=ratio - # scales img(bs,3,y,x) by ratio - if ratio == 1.0: - return img - else: - h, w = img.shape[2:] - s = (int(h * ratio), int(w * ratio)) # new size - img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize - if not same_shape: # pad/crop img - gs = 32 # (pixels) grid size - h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] - return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean - - -def copy_attr(a, b, include=(), exclude=()): - # Copy attributes from b to a, options to only include [...] and to exclude [...] - for k, v in b.__dict__.items(): - if (len(include) and k not in include) or k.startswith('_') or k in exclude: - continue - else: - setattr(a, k, v) - - -class ModelEMA: - """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models - Keep a moving average of everything in the model state_dict (parameters and buffers). - This is intended to allow functionality like - https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage - A smoothed version of the weights is necessary for some training schemes to perform well. - This class is sensitive where it is initialized in the sequence of model init, - GPU assignment and distributed training wrappers. - """ - - def __init__(self, model, decay=0.9999, updates=0): - # Create EMA - self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA - # if next(model.parameters()).device.type != 'cpu': - # self.ema.half() # FP16 EMA - self.updates = updates # number of EMA updates - self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) - for p in self.ema.parameters(): - p.requires_grad_(False) - - def update(self, model): - # Update EMA parameters - with torch.no_grad(): - self.updates += 1 - d = self.decay(self.updates) - - msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict - for k, v in self.ema.state_dict().items(): - if v.dtype.is_floating_point: - v *= d - v += (1. - d) * msd[k].detach() - - def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): - # Update EMA attributes - copy_attr(self.ema, model, include, exclude) diff --git a/spaces/widged/bart-generation/README.md b/spaces/widged/bart-generation/README.md deleted file mode 100644 index d7809fa15a367a7d8617cd7ef321085cd0ef1799..0000000000000000000000000000000000000000 --- a/spaces/widged/bart-generation/README.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Bart Generation -emoji: 😻 -colorFrom: yellow -colorTo: red -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. - -# Warnings - -:WARN: Not my own work. Borrowed or adapted from another space of the same name. diff --git a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/models/nasnet.py b/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/models/nasnet.py deleted file mode 100644 index b1f31def5515c3ba464c86cde471328b50c55b14..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/models/nasnet.py +++ /dev/null @@ -1,1131 +0,0 @@ -from __future__ import division, absolute_import -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.model_zoo as model_zoo - -__all__ = ['nasnetamobile'] -""" -NASNet Mobile -Thanks to Anastasiia (https://github.com/DagnyT) for the great help, support and motivation! - - ------------------------------------------------------------------------------------- - Architecture | Top-1 Acc | Top-5 Acc | Multiply-Adds | Params (M) ------------------------------------------------------------------------------------- -| NASNet-A (4 @ 1056) | 74.08% | 91.74% | 564 M | 5.3 | ------------------------------------------------------------------------------------- -# References: - - [Learning Transferable Architectures for Scalable Image Recognition] - (https://arxiv.org/abs/1707.07012) -""" -""" -Code imported from https://github.com/Cadene/pretrained-models.pytorch -""" - -pretrained_settings = { - 'nasnetamobile': { - 'imagenet': { - # 'url': 'https://github.com/veronikayurchuk/pretrained-models.pytorch/releases/download/v1.0/nasnetmobile-7e03cead.pth.tar', - 'url': - 'http://data.lip6.fr/cadene/pretrainedmodels/nasnetamobile-7e03cead.pth', - 'input_space': 'RGB', - 'input_size': [3, 224, 224], # resize 256 - 'input_range': [0, 1], - 'mean': [0.5, 0.5, 0.5], - 'std': [0.5, 0.5, 0.5], - 'num_classes': 1000 - }, - # 'imagenet+background': { - # # 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/nasnetalarge-a1897284.pth', - # 'input_space': 'RGB', - # 'input_size': [3, 224, 224], # resize 256 - # 'input_range': [0, 1], - # 'mean': [0.5, 0.5, 0.5], - # 'std': [0.5, 0.5, 0.5], - # 'num_classes': 1001 - # } - } -} - - -class MaxPoolPad(nn.Module): - - def __init__(self): - super(MaxPoolPad, self).__init__() - self.pad = nn.ZeroPad2d((1, 0, 1, 0)) - self.pool = nn.MaxPool2d(3, stride=2, padding=1) - - def forward(self, x): - x = self.pad(x) - x = self.pool(x) - x = x[:, :, 1:, 1:].contiguous() - return x - - -class AvgPoolPad(nn.Module): - - def __init__(self, stride=2, padding=1): - super(AvgPoolPad, self).__init__() - self.pad = nn.ZeroPad2d((1, 0, 1, 0)) - self.pool = nn.AvgPool2d( - 3, stride=stride, padding=padding, count_include_pad=False - ) - - def forward(self, x): - x = self.pad(x) - x = self.pool(x) - x = x[:, :, 1:, 1:].contiguous() - return x - - -class SeparableConv2d(nn.Module): - - def __init__( - self, - in_channels, - out_channels, - dw_kernel, - dw_stride, - dw_padding, - bias=False - ): - super(SeparableConv2d, self).__init__() - self.depthwise_conv2d = nn.Conv2d( - in_channels, - in_channels, - dw_kernel, - stride=dw_stride, - padding=dw_padding, - bias=bias, - groups=in_channels - ) - self.pointwise_conv2d = nn.Conv2d( - in_channels, out_channels, 1, stride=1, bias=bias - ) - - def forward(self, x): - x = self.depthwise_conv2d(x) - x = self.pointwise_conv2d(x) - return x - - -class BranchSeparables(nn.Module): - - def __init__( - self, - in_channels, - out_channels, - kernel_size, - stride, - padding, - name=None, - bias=False - ): - super(BranchSeparables, self).__init__() - self.relu = nn.ReLU() - self.separable_1 = SeparableConv2d( - in_channels, in_channels, kernel_size, stride, padding, bias=bias - ) - self.bn_sep_1 = nn.BatchNorm2d( - in_channels, eps=0.001, momentum=0.1, affine=True - ) - self.relu1 = nn.ReLU() - self.separable_2 = SeparableConv2d( - in_channels, out_channels, kernel_size, 1, padding, bias=bias - ) - self.bn_sep_2 = nn.BatchNorm2d( - out_channels, eps=0.001, momentum=0.1, affine=True - ) - self.name = name - - def forward(self, x): - x = self.relu(x) - if self.name == 'specific': - x = nn.ZeroPad2d((1, 0, 1, 0))(x) - x = self.separable_1(x) - if self.name == 'specific': - x = x[:, :, 1:, 1:].contiguous() - - x = self.bn_sep_1(x) - x = self.relu1(x) - x = self.separable_2(x) - x = self.bn_sep_2(x) - return x - - -class BranchSeparablesStem(nn.Module): - - def __init__( - self, - in_channels, - out_channels, - kernel_size, - stride, - padding, - bias=False - ): - super(BranchSeparablesStem, self).__init__() - self.relu = nn.ReLU() - self.separable_1 = SeparableConv2d( - in_channels, out_channels, kernel_size, stride, padding, bias=bias - ) - self.bn_sep_1 = nn.BatchNorm2d( - out_channels, eps=0.001, momentum=0.1, affine=True - ) - self.relu1 = nn.ReLU() - self.separable_2 = SeparableConv2d( - out_channels, out_channels, kernel_size, 1, padding, bias=bias - ) - self.bn_sep_2 = nn.BatchNorm2d( - out_channels, eps=0.001, momentum=0.1, affine=True - ) - - def forward(self, x): - x = self.relu(x) - x = self.separable_1(x) - x = self.bn_sep_1(x) - x = self.relu1(x) - x = self.separable_2(x) - x = self.bn_sep_2(x) - return x - - -class BranchSeparablesReduction(BranchSeparables): - - def __init__( - self, - in_channels, - out_channels, - kernel_size, - stride, - padding, - z_padding=1, - bias=False - ): - BranchSeparables.__init__( - self, in_channels, out_channels, kernel_size, stride, padding, bias - ) - self.padding = nn.ZeroPad2d((z_padding, 0, z_padding, 0)) - - def forward(self, x): - x = self.relu(x) - x = self.padding(x) - x = self.separable_1(x) - x = x[:, :, 1:, 1:].contiguous() - x = self.bn_sep_1(x) - x = self.relu1(x) - x = self.separable_2(x) - x = self.bn_sep_2(x) - return x - - -class CellStem0(nn.Module): - - def __init__(self, stem_filters, num_filters=42): - super(CellStem0, self).__init__() - self.num_filters = num_filters - self.stem_filters = stem_filters - self.conv_1x1 = nn.Sequential() - self.conv_1x1.add_module('relu', nn.ReLU()) - self.conv_1x1.add_module( - 'conv', - nn.Conv2d( - self.stem_filters, self.num_filters, 1, stride=1, bias=False - ) - ) - self.conv_1x1.add_module( - 'bn', - nn.BatchNorm2d( - self.num_filters, eps=0.001, momentum=0.1, affine=True - ) - ) - - self.comb_iter_0_left = BranchSeparables( - self.num_filters, self.num_filters, 5, 2, 2 - ) - self.comb_iter_0_right = BranchSeparablesStem( - self.stem_filters, self.num_filters, 7, 2, 3, bias=False - ) - - self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1) - self.comb_iter_1_right = BranchSeparablesStem( - self.stem_filters, self.num_filters, 7, 2, 3, bias=False - ) - - self.comb_iter_2_left = nn.AvgPool2d( - 3, stride=2, padding=1, count_include_pad=False - ) - self.comb_iter_2_right = BranchSeparablesStem( - self.stem_filters, self.num_filters, 5, 2, 2, bias=False - ) - - self.comb_iter_3_right = nn.AvgPool2d( - 3, stride=1, padding=1, count_include_pad=False - ) - - self.comb_iter_4_left = BranchSeparables( - self.num_filters, self.num_filters, 3, 1, 1, bias=False - ) - self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1) - - def forward(self, x): - x1 = self.conv_1x1(x) - - x_comb_iter_0_left = self.comb_iter_0_left(x1) - x_comb_iter_0_right = self.comb_iter_0_right(x) - x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right - - x_comb_iter_1_left = self.comb_iter_1_left(x1) - x_comb_iter_1_right = self.comb_iter_1_right(x) - x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right - - x_comb_iter_2_left = self.comb_iter_2_left(x1) - x_comb_iter_2_right = self.comb_iter_2_right(x) - x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right - - x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) - x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 - - x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) - x_comb_iter_4_right = self.comb_iter_4_right(x1) - x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right - - x_out = torch.cat( - [x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1 - ) - return x_out - - -class CellStem1(nn.Module): - - def __init__(self, stem_filters, num_filters): - super(CellStem1, self).__init__() - self.num_filters = num_filters - self.stem_filters = stem_filters - self.conv_1x1 = nn.Sequential() - self.conv_1x1.add_module('relu', nn.ReLU()) - self.conv_1x1.add_module( - 'conv', - nn.Conv2d( - 2 * self.num_filters, - self.num_filters, - 1, - stride=1, - bias=False - ) - ) - self.conv_1x1.add_module( - 'bn', - nn.BatchNorm2d( - self.num_filters, eps=0.001, momentum=0.1, affine=True - ) - ) - - self.relu = nn.ReLU() - self.path_1 = nn.Sequential() - self.path_1.add_module( - 'avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False) - ) - self.path_1.add_module( - 'conv', - nn.Conv2d( - self.stem_filters, - self.num_filters // 2, - 1, - stride=1, - bias=False - ) - ) - self.path_2 = nn.ModuleList() - self.path_2.add_module('pad', nn.ZeroPad2d((0, 1, 0, 1))) - self.path_2.add_module( - 'avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False) - ) - self.path_2.add_module( - 'conv', - nn.Conv2d( - self.stem_filters, - self.num_filters // 2, - 1, - stride=1, - bias=False - ) - ) - - self.final_path_bn = nn.BatchNorm2d( - self.num_filters, eps=0.001, momentum=0.1, affine=True - ) - - self.comb_iter_0_left = BranchSeparables( - self.num_filters, - self.num_filters, - 5, - 2, - 2, - name='specific', - bias=False - ) - self.comb_iter_0_right = BranchSeparables( - self.num_filters, - self.num_filters, - 7, - 2, - 3, - name='specific', - bias=False - ) - - # self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1) - self.comb_iter_1_left = MaxPoolPad() - self.comb_iter_1_right = BranchSeparables( - self.num_filters, - self.num_filters, - 7, - 2, - 3, - name='specific', - bias=False - ) - - # self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False) - self.comb_iter_2_left = AvgPoolPad() - self.comb_iter_2_right = BranchSeparables( - self.num_filters, - self.num_filters, - 5, - 2, - 2, - name='specific', - bias=False - ) - - self.comb_iter_3_right = nn.AvgPool2d( - 3, stride=1, padding=1, count_include_pad=False - ) - - self.comb_iter_4_left = BranchSeparables( - self.num_filters, - self.num_filters, - 3, - 1, - 1, - name='specific', - bias=False - ) - # self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1) - self.comb_iter_4_right = MaxPoolPad() - - def forward(self, x_conv0, x_stem_0): - x_left = self.conv_1x1(x_stem_0) - - x_relu = self.relu(x_conv0) - # path 1 - x_path1 = self.path_1(x_relu) - # path 2 - x_path2 = self.path_2.pad(x_relu) - x_path2 = x_path2[:, :, 1:, 1:] - x_path2 = self.path_2.avgpool(x_path2) - x_path2 = self.path_2.conv(x_path2) - # final path - x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) - - x_comb_iter_0_left = self.comb_iter_0_left(x_left) - x_comb_iter_0_right = self.comb_iter_0_right(x_right) - x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right - - x_comb_iter_1_left = self.comb_iter_1_left(x_left) - x_comb_iter_1_right = self.comb_iter_1_right(x_right) - x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right - - x_comb_iter_2_left = self.comb_iter_2_left(x_left) - x_comb_iter_2_right = self.comb_iter_2_right(x_right) - x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right - - x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) - x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 - - x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) - x_comb_iter_4_right = self.comb_iter_4_right(x_left) - x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right - - x_out = torch.cat( - [x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1 - ) - return x_out - - -class FirstCell(nn.Module): - - def __init__( - self, in_channels_left, out_channels_left, in_channels_right, - out_channels_right - ): - super(FirstCell, self).__init__() - self.conv_1x1 = nn.Sequential() - self.conv_1x1.add_module('relu', nn.ReLU()) - self.conv_1x1.add_module( - 'conv', - nn.Conv2d( - in_channels_right, out_channels_right, 1, stride=1, bias=False - ) - ) - self.conv_1x1.add_module( - 'bn', - nn.BatchNorm2d( - out_channels_right, eps=0.001, momentum=0.1, affine=True - ) - ) - - self.relu = nn.ReLU() - self.path_1 = nn.Sequential() - self.path_1.add_module( - 'avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False) - ) - self.path_1.add_module( - 'conv', - nn.Conv2d( - in_channels_left, out_channels_left, 1, stride=1, bias=False - ) - ) - self.path_2 = nn.ModuleList() - self.path_2.add_module('pad', nn.ZeroPad2d((0, 1, 0, 1))) - self.path_2.add_module( - 'avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False) - ) - self.path_2.add_module( - 'conv', - nn.Conv2d( - in_channels_left, out_channels_left, 1, stride=1, bias=False - ) - ) - - self.final_path_bn = nn.BatchNorm2d( - out_channels_left * 2, eps=0.001, momentum=0.1, affine=True - ) - - self.comb_iter_0_left = BranchSeparables( - out_channels_right, out_channels_right, 5, 1, 2, bias=False - ) - self.comb_iter_0_right = BranchSeparables( - out_channels_right, out_channels_right, 3, 1, 1, bias=False - ) - - self.comb_iter_1_left = BranchSeparables( - out_channels_right, out_channels_right, 5, 1, 2, bias=False - ) - self.comb_iter_1_right = BranchSeparables( - out_channels_right, out_channels_right, 3, 1, 1, bias=False - ) - - self.comb_iter_2_left = nn.AvgPool2d( - 3, stride=1, padding=1, count_include_pad=False - ) - - self.comb_iter_3_left = nn.AvgPool2d( - 3, stride=1, padding=1, count_include_pad=False - ) - self.comb_iter_3_right = nn.AvgPool2d( - 3, stride=1, padding=1, count_include_pad=False - ) - - self.comb_iter_4_left = BranchSeparables( - out_channels_right, out_channels_right, 3, 1, 1, bias=False - ) - - def forward(self, x, x_prev): - x_relu = self.relu(x_prev) - # path 1 - x_path1 = self.path_1(x_relu) - # path 2 - x_path2 = self.path_2.pad(x_relu) - x_path2 = x_path2[:, :, 1:, 1:] - x_path2 = self.path_2.avgpool(x_path2) - x_path2 = self.path_2.conv(x_path2) - # final path - x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) - - x_right = self.conv_1x1(x) - - x_comb_iter_0_left = self.comb_iter_0_left(x_right) - x_comb_iter_0_right = self.comb_iter_0_right(x_left) - x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right - - x_comb_iter_1_left = self.comb_iter_1_left(x_left) - x_comb_iter_1_right = self.comb_iter_1_right(x_left) - x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right - - x_comb_iter_2_left = self.comb_iter_2_left(x_right) - x_comb_iter_2 = x_comb_iter_2_left + x_left - - x_comb_iter_3_left = self.comb_iter_3_left(x_left) - x_comb_iter_3_right = self.comb_iter_3_right(x_left) - x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right - - x_comb_iter_4_left = self.comb_iter_4_left(x_right) - x_comb_iter_4 = x_comb_iter_4_left + x_right - - x_out = torch.cat( - [ - x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, - x_comb_iter_3, x_comb_iter_4 - ], 1 - ) - return x_out - - -class NormalCell(nn.Module): - - def __init__( - self, in_channels_left, out_channels_left, in_channels_right, - out_channels_right - ): - super(NormalCell, self).__init__() - self.conv_prev_1x1 = nn.Sequential() - self.conv_prev_1x1.add_module('relu', nn.ReLU()) - self.conv_prev_1x1.add_module( - 'conv', - nn.Conv2d( - in_channels_left, out_channels_left, 1, stride=1, bias=False - ) - ) - self.conv_prev_1x1.add_module( - 'bn', - nn.BatchNorm2d( - out_channels_left, eps=0.001, momentum=0.1, affine=True - ) - ) - - self.conv_1x1 = nn.Sequential() - self.conv_1x1.add_module('relu', nn.ReLU()) - self.conv_1x1.add_module( - 'conv', - nn.Conv2d( - in_channels_right, out_channels_right, 1, stride=1, bias=False - ) - ) - self.conv_1x1.add_module( - 'bn', - nn.BatchNorm2d( - out_channels_right, eps=0.001, momentum=0.1, affine=True - ) - ) - - self.comb_iter_0_left = BranchSeparables( - out_channels_right, out_channels_right, 5, 1, 2, bias=False - ) - self.comb_iter_0_right = BranchSeparables( - out_channels_left, out_channels_left, 3, 1, 1, bias=False - ) - - self.comb_iter_1_left = BranchSeparables( - out_channels_left, out_channels_left, 5, 1, 2, bias=False - ) - self.comb_iter_1_right = BranchSeparables( - out_channels_left, out_channels_left, 3, 1, 1, bias=False - ) - - self.comb_iter_2_left = nn.AvgPool2d( - 3, stride=1, padding=1, count_include_pad=False - ) - - self.comb_iter_3_left = nn.AvgPool2d( - 3, stride=1, padding=1, count_include_pad=False - ) - self.comb_iter_3_right = nn.AvgPool2d( - 3, stride=1, padding=1, count_include_pad=False - ) - - self.comb_iter_4_left = BranchSeparables( - out_channels_right, out_channels_right, 3, 1, 1, bias=False - ) - - def forward(self, x, x_prev): - x_left = self.conv_prev_1x1(x_prev) - x_right = self.conv_1x1(x) - - x_comb_iter_0_left = self.comb_iter_0_left(x_right) - x_comb_iter_0_right = self.comb_iter_0_right(x_left) - x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right - - x_comb_iter_1_left = self.comb_iter_1_left(x_left) - x_comb_iter_1_right = self.comb_iter_1_right(x_left) - x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right - - x_comb_iter_2_left = self.comb_iter_2_left(x_right) - x_comb_iter_2 = x_comb_iter_2_left + x_left - - x_comb_iter_3_left = self.comb_iter_3_left(x_left) - x_comb_iter_3_right = self.comb_iter_3_right(x_left) - x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right - - x_comb_iter_4_left = self.comb_iter_4_left(x_right) - x_comb_iter_4 = x_comb_iter_4_left + x_right - - x_out = torch.cat( - [ - x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, - x_comb_iter_3, x_comb_iter_4 - ], 1 - ) - return x_out - - -class ReductionCell0(nn.Module): - - def __init__( - self, in_channels_left, out_channels_left, in_channels_right, - out_channels_right - ): - super(ReductionCell0, self).__init__() - self.conv_prev_1x1 = nn.Sequential() - self.conv_prev_1x1.add_module('relu', nn.ReLU()) - self.conv_prev_1x1.add_module( - 'conv', - nn.Conv2d( - in_channels_left, out_channels_left, 1, stride=1, bias=False - ) - ) - self.conv_prev_1x1.add_module( - 'bn', - nn.BatchNorm2d( - out_channels_left, eps=0.001, momentum=0.1, affine=True - ) - ) - - self.conv_1x1 = nn.Sequential() - self.conv_1x1.add_module('relu', nn.ReLU()) - self.conv_1x1.add_module( - 'conv', - nn.Conv2d( - in_channels_right, out_channels_right, 1, stride=1, bias=False - ) - ) - self.conv_1x1.add_module( - 'bn', - nn.BatchNorm2d( - out_channels_right, eps=0.001, momentum=0.1, affine=True - ) - ) - - self.comb_iter_0_left = BranchSeparablesReduction( - out_channels_right, out_channels_right, 5, 2, 2, bias=False - ) - self.comb_iter_0_right = BranchSeparablesReduction( - out_channels_right, out_channels_right, 7, 2, 3, bias=False - ) - - self.comb_iter_1_left = MaxPoolPad() - self.comb_iter_1_right = BranchSeparablesReduction( - out_channels_right, out_channels_right, 7, 2, 3, bias=False - ) - - self.comb_iter_2_left = AvgPoolPad() - self.comb_iter_2_right = BranchSeparablesReduction( - out_channels_right, out_channels_right, 5, 2, 2, bias=False - ) - - self.comb_iter_3_right = nn.AvgPool2d( - 3, stride=1, padding=1, count_include_pad=False - ) - - self.comb_iter_4_left = BranchSeparablesReduction( - out_channels_right, out_channels_right, 3, 1, 1, bias=False - ) - self.comb_iter_4_right = MaxPoolPad() - - def forward(self, x, x_prev): - x_left = self.conv_prev_1x1(x_prev) - x_right = self.conv_1x1(x) - - x_comb_iter_0_left = self.comb_iter_0_left(x_right) - x_comb_iter_0_right = self.comb_iter_0_right(x_left) - x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right - - x_comb_iter_1_left = self.comb_iter_1_left(x_right) - x_comb_iter_1_right = self.comb_iter_1_right(x_left) - x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right - - x_comb_iter_2_left = self.comb_iter_2_left(x_right) - x_comb_iter_2_right = self.comb_iter_2_right(x_left) - x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right - - x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) - x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 - - x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) - x_comb_iter_4_right = self.comb_iter_4_right(x_right) - x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right - - x_out = torch.cat( - [x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1 - ) - return x_out - - -class ReductionCell1(nn.Module): - - def __init__( - self, in_channels_left, out_channels_left, in_channels_right, - out_channels_right - ): - super(ReductionCell1, self).__init__() - self.conv_prev_1x1 = nn.Sequential() - self.conv_prev_1x1.add_module('relu', nn.ReLU()) - self.conv_prev_1x1.add_module( - 'conv', - nn.Conv2d( - in_channels_left, out_channels_left, 1, stride=1, bias=False - ) - ) - self.conv_prev_1x1.add_module( - 'bn', - nn.BatchNorm2d( - out_channels_left, eps=0.001, momentum=0.1, affine=True - ) - ) - - self.conv_1x1 = nn.Sequential() - self.conv_1x1.add_module('relu', nn.ReLU()) - self.conv_1x1.add_module( - 'conv', - nn.Conv2d( - in_channels_right, out_channels_right, 1, stride=1, bias=False - ) - ) - self.conv_1x1.add_module( - 'bn', - nn.BatchNorm2d( - out_channels_right, eps=0.001, momentum=0.1, affine=True - ) - ) - - self.comb_iter_0_left = BranchSeparables( - out_channels_right, - out_channels_right, - 5, - 2, - 2, - name='specific', - bias=False - ) - self.comb_iter_0_right = BranchSeparables( - out_channels_right, - out_channels_right, - 7, - 2, - 3, - name='specific', - bias=False - ) - - # self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1) - self.comb_iter_1_left = MaxPoolPad() - self.comb_iter_1_right = BranchSeparables( - out_channels_right, - out_channels_right, - 7, - 2, - 3, - name='specific', - bias=False - ) - - # self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False) - self.comb_iter_2_left = AvgPoolPad() - self.comb_iter_2_right = BranchSeparables( - out_channels_right, - out_channels_right, - 5, - 2, - 2, - name='specific', - bias=False - ) - - self.comb_iter_3_right = nn.AvgPool2d( - 3, stride=1, padding=1, count_include_pad=False - ) - - self.comb_iter_4_left = BranchSeparables( - out_channels_right, - out_channels_right, - 3, - 1, - 1, - name='specific', - bias=False - ) - # self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1) - self.comb_iter_4_right = MaxPoolPad() - - def forward(self, x, x_prev): - x_left = self.conv_prev_1x1(x_prev) - x_right = self.conv_1x1(x) - - x_comb_iter_0_left = self.comb_iter_0_left(x_right) - x_comb_iter_0_right = self.comb_iter_0_right(x_left) - x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right - - x_comb_iter_1_left = self.comb_iter_1_left(x_right) - x_comb_iter_1_right = self.comb_iter_1_right(x_left) - x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right - - x_comb_iter_2_left = self.comb_iter_2_left(x_right) - x_comb_iter_2_right = self.comb_iter_2_right(x_left) - x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right - - x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) - x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 - - x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) - x_comb_iter_4_right = self.comb_iter_4_right(x_right) - x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right - - x_out = torch.cat( - [x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1 - ) - return x_out - - -class NASNetAMobile(nn.Module): - """Neural Architecture Search (NAS). - - Reference: - Zoph et al. Learning Transferable Architectures - for Scalable Image Recognition. CVPR 2018. - - Public keys: - - ``nasnetamobile``: NASNet-A Mobile. - """ - - def __init__( - self, - num_classes, - loss, - stem_filters=32, - penultimate_filters=1056, - filters_multiplier=2, - **kwargs - ): - super(NASNetAMobile, self).__init__() - self.stem_filters = stem_filters - self.penultimate_filters = penultimate_filters - self.filters_multiplier = filters_multiplier - self.loss = loss - - filters = self.penultimate_filters // 24 - # 24 is default value for the architecture - - self.conv0 = nn.Sequential() - self.conv0.add_module( - 'conv', - nn.Conv2d( - in_channels=3, - out_channels=self.stem_filters, - kernel_size=3, - padding=0, - stride=2, - bias=False - ) - ) - self.conv0.add_module( - 'bn', - nn.BatchNorm2d( - self.stem_filters, eps=0.001, momentum=0.1, affine=True - ) - ) - - self.cell_stem_0 = CellStem0( - self.stem_filters, num_filters=filters // (filters_multiplier**2) - ) - self.cell_stem_1 = CellStem1( - self.stem_filters, num_filters=filters // filters_multiplier - ) - - self.cell_0 = FirstCell( - in_channels_left=filters, - out_channels_left=filters // 2, # 1, 0.5 - in_channels_right=2 * filters, - out_channels_right=filters - ) # 2, 1 - self.cell_1 = NormalCell( - in_channels_left=2 * filters, - out_channels_left=filters, # 2, 1 - in_channels_right=6 * filters, - out_channels_right=filters - ) # 6, 1 - self.cell_2 = NormalCell( - in_channels_left=6 * filters, - out_channels_left=filters, # 6, 1 - in_channels_right=6 * filters, - out_channels_right=filters - ) # 6, 1 - self.cell_3 = NormalCell( - in_channels_left=6 * filters, - out_channels_left=filters, # 6, 1 - in_channels_right=6 * filters, - out_channels_right=filters - ) # 6, 1 - - self.reduction_cell_0 = ReductionCell0( - in_channels_left=6 * filters, - out_channels_left=2 * filters, # 6, 2 - in_channels_right=6 * filters, - out_channels_right=2 * filters - ) # 6, 2 - - self.cell_6 = FirstCell( - in_channels_left=6 * filters, - out_channels_left=filters, # 6, 1 - in_channels_right=8 * filters, - out_channels_right=2 * filters - ) # 8, 2 - self.cell_7 = NormalCell( - in_channels_left=8 * filters, - out_channels_left=2 * filters, # 8, 2 - in_channels_right=12 * filters, - out_channels_right=2 * filters - ) # 12, 2 - self.cell_8 = NormalCell( - in_channels_left=12 * filters, - out_channels_left=2 * filters, # 12, 2 - in_channels_right=12 * filters, - out_channels_right=2 * filters - ) # 12, 2 - self.cell_9 = NormalCell( - in_channels_left=12 * filters, - out_channels_left=2 * filters, # 12, 2 - in_channels_right=12 * filters, - out_channels_right=2 * filters - ) # 12, 2 - - self.reduction_cell_1 = ReductionCell1( - in_channels_left=12 * filters, - out_channels_left=4 * filters, # 12, 4 - in_channels_right=12 * filters, - out_channels_right=4 * filters - ) # 12, 4 - - self.cell_12 = FirstCell( - in_channels_left=12 * filters, - out_channels_left=2 * filters, # 12, 2 - in_channels_right=16 * filters, - out_channels_right=4 * filters - ) # 16, 4 - self.cell_13 = NormalCell( - in_channels_left=16 * filters, - out_channels_left=4 * filters, # 16, 4 - in_channels_right=24 * filters, - out_channels_right=4 * filters - ) # 24, 4 - self.cell_14 = NormalCell( - in_channels_left=24 * filters, - out_channels_left=4 * filters, # 24, 4 - in_channels_right=24 * filters, - out_channels_right=4 * filters - ) # 24, 4 - self.cell_15 = NormalCell( - in_channels_left=24 * filters, - out_channels_left=4 * filters, # 24, 4 - in_channels_right=24 * filters, - out_channels_right=4 * filters - ) # 24, 4 - - self.relu = nn.ReLU() - self.dropout = nn.Dropout() - self.classifier = nn.Linear(24 * filters, num_classes) - - self._init_params() - - def _init_params(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_( - m.weight, mode='fan_out', nonlinearity='relu' - ) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.BatchNorm2d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.BatchNorm1d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def features(self, input): - x_conv0 = self.conv0(input) - x_stem_0 = self.cell_stem_0(x_conv0) - x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0) - - x_cell_0 = self.cell_0(x_stem_1, x_stem_0) - x_cell_1 = self.cell_1(x_cell_0, x_stem_1) - x_cell_2 = self.cell_2(x_cell_1, x_cell_0) - x_cell_3 = self.cell_3(x_cell_2, x_cell_1) - - x_reduction_cell_0 = self.reduction_cell_0(x_cell_3, x_cell_2) - - x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_3) - x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0) - x_cell_8 = self.cell_8(x_cell_7, x_cell_6) - x_cell_9 = self.cell_9(x_cell_8, x_cell_7) - - x_reduction_cell_1 = self.reduction_cell_1(x_cell_9, x_cell_8) - - x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_9) - x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1) - x_cell_14 = self.cell_14(x_cell_13, x_cell_12) - x_cell_15 = self.cell_15(x_cell_14, x_cell_13) - - x_cell_15 = self.relu(x_cell_15) - x_cell_15 = F.avg_pool2d( - x_cell_15, - x_cell_15.size()[2:] - ) # global average pool - x_cell_15 = x_cell_15.view(x_cell_15.size(0), -1) - x_cell_15 = self.dropout(x_cell_15) - - return x_cell_15 - - def forward(self, input): - v = self.features(input) - - if not self.training: - return v - - y = self.classifier(v) - - if self.loss == 'softmax': - return y - elif self.loss == 'triplet': - return y, v - else: - raise KeyError('Unsupported loss: {}'.format(self.loss)) - - -def init_pretrained_weights(model, model_url): - """Initializes model with pretrained weights. - - Layers that don't match with pretrained layers in name or size are kept unchanged. - """ - pretrain_dict = model_zoo.load_url(model_url) - model_dict = model.state_dict() - pretrain_dict = { - k: v - for k, v in pretrain_dict.items() - if k in model_dict and model_dict[k].size() == v.size() - } - model_dict.update(pretrain_dict) - model.load_state_dict(model_dict) - - -def nasnetamobile(num_classes, loss='softmax', pretrained=True, **kwargs): - model = NASNetAMobile(num_classes, loss, **kwargs) - if pretrained: - model_url = pretrained_settings['nasnetamobile']['imagenet']['url'] - init_pretrained_weights(model, model_url) - return model diff --git a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/metrics/rank_cylib/Makefile b/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/metrics/rank_cylib/Makefile deleted file mode 100644 index d49e655f85f829cb8ccda5bad6fe2c65cccf2bf2..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/metrics/rank_cylib/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -all: - $(PYTHON) setup.py build_ext --inplace - rm -rf build -clean: - rm -rf build - rm -f rank_cy.c *.so \ No newline at end of file diff --git a/spaces/xswu/HPSv2/src/open_clip/constants.py b/spaces/xswu/HPSv2/src/open_clip/constants.py deleted file mode 100644 index a670bb3fab442baeb9af53b91c312e6982af57ee..0000000000000000000000000000000000000000 --- a/spaces/xswu/HPSv2/src/open_clip/constants.py +++ /dev/null @@ -1,2 +0,0 @@ -OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073) -OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711) diff --git a/spaces/yaoshining/text-generation-webui/modules/models_settings.py b/spaces/yaoshining/text-generation-webui/modules/models_settings.py deleted file mode 100644 index 0207e7de76e54f438ee98d3b4e8344446796dd47..0000000000000000000000000000000000000000 --- a/spaces/yaoshining/text-generation-webui/modules/models_settings.py +++ /dev/null @@ -1,134 +0,0 @@ -import re -from pathlib import Path - -import yaml - -from modules import shared, ui - - -def get_model_settings_from_yamls(model): - settings = shared.model_config - model_settings = {} - for pat in settings: - if re.match(pat.lower(), model.lower()): - for k in settings[pat]: - model_settings[k] = settings[pat][k] - - return model_settings - - -def infer_loader(model_name): - path_to_model = Path(f'{shared.args.model_dir}/{model_name}') - model_settings = get_model_settings_from_yamls(model_name) - if not path_to_model.exists(): - loader = None - elif Path(f'{shared.args.model_dir}/{model_name}/quantize_config.json').exists() or ('wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0): - loader = 'AutoGPTQ' - elif len(list(path_to_model.glob('*ggml*.bin'))) > 0: - loader = 'llama.cpp' - elif re.match('.*ggml.*\.bin', model_name.lower()): - loader = 'llama.cpp' - elif re.match('.*rwkv.*\.pth', model_name.lower()): - loader = 'RWKV' - elif shared.args.flexgen: - loader = 'FlexGen' - else: - loader = 'Transformers' - - return loader - - -# UI: update the command-line arguments based on the interface values -def update_model_parameters(state, initial=False): - elements = ui.list_model_elements() # the names of the parameters - gpu_memories = [] - - for i, element in enumerate(elements): - if element not in state: - continue - - value = state[element] - if element.startswith('gpu_memory'): - gpu_memories.append(value) - continue - - if initial and vars(shared.args)[element] != vars(shared.args_defaults)[element]: - continue - - # Setting null defaults - if element in ['wbits', 'groupsize', 'model_type'] and value == 'None': - value = vars(shared.args_defaults)[element] - elif element in ['cpu_memory'] and value == 0: - value = vars(shared.args_defaults)[element] - - # Making some simple conversions - if element in ['wbits', 'groupsize', 'pre_layer']: - value = int(value) - elif element == 'cpu_memory' and value is not None: - value = f"{value}MiB" - - if element in ['pre_layer']: - value = [value] if value > 0 else None - - setattr(shared.args, element, value) - - found_positive = False - for i in gpu_memories: - if i > 0: - found_positive = True - break - - if not (initial and vars(shared.args)['gpu_memory'] != vars(shared.args_defaults)['gpu_memory']): - if found_positive: - shared.args.gpu_memory = [f"{i}MiB" for i in gpu_memories] - else: - shared.args.gpu_memory = None - - -# UI: update the state variable with the model settings -def apply_model_settings_to_state(model, state): - model_settings = get_model_settings_from_yamls(model) - if 'loader' not in model_settings: - loader = infer_loader(model) - if 'wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0: - loader = 'AutoGPTQ' - - # If the user is using an alternative GPTQ loader, let them keep using it - if not (loader == 'AutoGPTQ' and state['loader'] in ['GPTQ-for-LLaMa', 'ExLlama', 'ExLlama_HF']): - state['loader'] = loader - - for k in model_settings: - if k in state: - state[k] = model_settings[k] - - return state - - -# Save the settings for this model to models/config-user.yaml -def save_model_settings(model, state): - if model == 'None': - yield ("Not saving the settings because no model is loaded.") - return - - with Path(f'{shared.args.model_dir}/config-user.yaml') as p: - if p.exists(): - user_config = yaml.safe_load(open(p, 'r').read()) - else: - user_config = {} - - model_regex = model + '$' # For exact matches - for _dict in [user_config, shared.model_config]: - if model_regex not in _dict: - _dict[model_regex] = {} - - if model_regex not in user_config: - user_config[model_regex] = {} - - for k in ui.list_model_elements(): - user_config[model_regex][k] = state[k] - shared.model_config[model_regex][k] = state[k] - - with open(p, 'w') as f: - f.write(yaml.dump(user_config, sort_keys=False)) - - yield (f"Settings for {model} saved to {p}") diff --git a/spaces/ybelkada/interfacegan_pp/models/pggan_tf_official/tfutil.py b/spaces/ybelkada/interfacegan_pp/models/pggan_tf_official/tfutil.py deleted file mode 100644 index cf7ad0ada400aae935759190a6384c5dd8a3fc08..0000000000000000000000000000000000000000 --- a/spaces/ybelkada/interfacegan_pp/models/pggan_tf_official/tfutil.py +++ /dev/null @@ -1,749 +0,0 @@ -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -import os -import sys -import inspect -import importlib -import imp -import numpy as np -from collections import OrderedDict -import tensorflow as tf - -#---------------------------------------------------------------------------- -# Convenience. - -def run(*args, **kwargs): # Run the specified ops in the default session. - return tf.get_default_session().run(*args, **kwargs) - -def is_tf_expression(x): - return isinstance(x, tf.Tensor) or isinstance(x, tf.Variable) or isinstance(x, tf.Operation) - -def shape_to_list(shape): - return [dim.value for dim in shape] - -def flatten(x): - with tf.name_scope('Flatten'): - return tf.reshape(x, [-1]) - -def log2(x): - with tf.name_scope('Log2'): - return tf.log(x) * np.float32(1.0 / np.log(2.0)) - -def exp2(x): - with tf.name_scope('Exp2'): - return tf.exp(x * np.float32(np.log(2.0))) - -def lerp(a, b, t): - with tf.name_scope('Lerp'): - return a + (b - a) * t - -def lerp_clip(a, b, t): - with tf.name_scope('LerpClip'): - return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0) - -def absolute_name_scope(scope): # Forcefully enter the specified name scope, ignoring any surrounding scopes. - return tf.name_scope(scope + '/') - -#---------------------------------------------------------------------------- -# Initialize TensorFlow graph and session using good default settings. - -def init_tf(config_dict=dict()): - if tf.get_default_session() is None: - tf.set_random_seed(np.random.randint(1 << 31)) - create_session(config_dict, force_as_default=True) - -#---------------------------------------------------------------------------- -# Create tf.Session based on config dict of the form -# {'gpu_options.allow_growth': True} - -def create_session(config_dict=dict(), force_as_default=False): - config = tf.ConfigProto() - for key, value in config_dict.items(): - fields = key.split('.') - obj = config - for field in fields[:-1]: - obj = getattr(obj, field) - setattr(obj, fields[-1], value) - session = tf.Session(config=config) - if force_as_default: - session._default_session = session.as_default() - session._default_session.enforce_nesting = False - session._default_session.__enter__() - return session - -#---------------------------------------------------------------------------- -# Initialize all tf.Variables that have not already been initialized. -# Equivalent to the following, but more efficient and does not bloat the tf graph: -# tf.variables_initializer(tf.report_unitialized_variables()).run() - -def init_uninited_vars(vars=None): - if vars is None: vars = tf.global_variables() - test_vars = []; test_ops = [] - with tf.control_dependencies(None): # ignore surrounding control_dependencies - for var in vars: - assert is_tf_expression(var) - try: - tf.get_default_graph().get_tensor_by_name(var.name.replace(':0', '/IsVariableInitialized:0')) - except KeyError: - # Op does not exist => variable may be uninitialized. - test_vars.append(var) - with absolute_name_scope(var.name.split(':')[0]): - test_ops.append(tf.is_variable_initialized(var)) - init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited] - run([var.initializer for var in init_vars]) - -#---------------------------------------------------------------------------- -# Set the values of given tf.Variables. -# Equivalent to the following, but more efficient and does not bloat the tf graph: -# tfutil.run([tf.assign(var, value) for var, value in var_to_value_dict.items()] - -def set_vars(var_to_value_dict): - ops = [] - feed_dict = {} - for var, value in var_to_value_dict.items(): - assert is_tf_expression(var) - try: - setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(':0', '/setter:0')) # look for existing op - except KeyError: - with absolute_name_scope(var.name.split(':')[0]): - with tf.control_dependencies(None): # ignore surrounding control_dependencies - setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, 'new_value'), name='setter') # create new setter - ops.append(setter) - feed_dict[setter.op.inputs[1]] = value - run(ops, feed_dict) - -#---------------------------------------------------------------------------- -# Autosummary creates an identity op that internally keeps track of the input -# values and automatically shows up in TensorBoard. The reported value -# represents an average over input components. The average is accumulated -# constantly over time and flushed when save_summaries() is called. -# -# Notes: -# - The output tensor must be used as an input for something else in the -# graph. Otherwise, the autosummary op will not get executed, and the average -# value will not get accumulated. -# - It is perfectly fine to include autosummaries with the same name in -# several places throughout the graph, even if they are executed concurrently. -# - It is ok to also pass in a python scalar or numpy array. In this case, it -# is added to the average immediately. - -_autosummary_vars = OrderedDict() # name => [var, ...] -_autosummary_immediate = OrderedDict() # name => update_op, update_value -_autosummary_finalized = False - -def autosummary(name, value): - id = name.replace('/', '_') - if is_tf_expression(value): - with tf.name_scope('summary_' + id), tf.device(value.device): - update_op = _create_autosummary_var(name, value) - with tf.control_dependencies([update_op]): - return tf.identity(value) - else: # python scalar or numpy array - if name not in _autosummary_immediate: - with absolute_name_scope('Autosummary/' + id), tf.device(None), tf.control_dependencies(None): - update_value = tf.placeholder(tf.float32) - update_op = _create_autosummary_var(name, update_value) - _autosummary_immediate[name] = update_op, update_value - update_op, update_value = _autosummary_immediate[name] - run(update_op, {update_value: np.float32(value)}) - return value - -# Create the necessary ops to include autosummaries in TensorBoard report. -# Note: This should be done only once per graph. -def finalize_autosummaries(): - global _autosummary_finalized - if _autosummary_finalized: - return - _autosummary_finalized = True - init_uninited_vars([var for vars in _autosummary_vars.values() for var in vars]) - with tf.device(None), tf.control_dependencies(None): - for name, vars in _autosummary_vars.items(): - id = name.replace('/', '_') - with absolute_name_scope('Autosummary/' + id): - sum = tf.add_n(vars) - avg = sum[0] / sum[1] - with tf.control_dependencies([avg]): # read before resetting - reset_ops = [tf.assign(var, tf.zeros(2)) for var in vars] - with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting - tf.summary.scalar(name, avg) - -# Internal helper for creating autosummary accumulators. -def _create_autosummary_var(name, value_expr): - assert not _autosummary_finalized - v = tf.cast(value_expr, tf.float32) - if v.shape.ndims is 0: - v = [v, np.float32(1.0)] - elif v.shape.ndims is 1: - v = [tf.reduce_sum(v), tf.cast(tf.shape(v)[0], tf.float32)] - else: - v = [tf.reduce_sum(v), tf.reduce_prod(tf.cast(tf.shape(v), tf.float32))] - v = tf.cond(tf.is_finite(v[0]), lambda: tf.stack(v), lambda: tf.zeros(2)) - with tf.control_dependencies(None): - var = tf.Variable(tf.zeros(2)) # [numerator, denominator] - update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v)) - if name in _autosummary_vars: - _autosummary_vars[name].append(var) - else: - _autosummary_vars[name] = [var] - return update_op - -#---------------------------------------------------------------------------- -# Call filewriter.add_summary() with all summaries in the default graph, -# automatically finalizing and merging them on the first call. - -_summary_merge_op = None - -def save_summaries(filewriter, global_step=None): - global _summary_merge_op - if _summary_merge_op is None: - finalize_autosummaries() - with tf.device(None), tf.control_dependencies(None): - _summary_merge_op = tf.summary.merge_all() - filewriter.add_summary(_summary_merge_op.eval(), global_step) - -#---------------------------------------------------------------------------- -# Utilities for importing modules and objects by name. - -def import_module(module_or_obj_name): - parts = module_or_obj_name.split('.') - parts[0] = {'np': 'numpy', 'tf': 'tensorflow'}.get(parts[0], parts[0]) - for i in range(len(parts), 0, -1): - try: - module = importlib.import_module('.'.join(parts[:i])) - relative_obj_name = '.'.join(parts[i:]) - return module, relative_obj_name - except ImportError: - pass - raise ImportError(module_or_obj_name) - -def find_obj_in_module(module, relative_obj_name): - obj = module - for part in relative_obj_name.split('.'): - obj = getattr(obj, part) - return obj - -def import_obj(obj_name): - module, relative_obj_name = import_module(obj_name) - return find_obj_in_module(module, relative_obj_name) - -def call_func_by_name(*args, func=None, **kwargs): - assert func is not None - return import_obj(func)(*args, **kwargs) - -#---------------------------------------------------------------------------- -# Wrapper for tf.train.Optimizer that automatically takes care of: -# - Gradient averaging for multi-GPU training. -# - Dynamic loss scaling and typecasts for FP16 training. -# - Ignoring corrupted gradients that contain NaNs/Infs. -# - Reporting statistics. -# - Well-chosen default settings. - -class Optimizer: - def __init__( - self, - name = 'Train', - tf_optimizer = 'tf.train.AdamOptimizer', - learning_rate = 0.001, - use_loss_scaling = False, - loss_scaling_init = 64.0, - loss_scaling_inc = 0.0005, - loss_scaling_dec = 1.0, - **kwargs): - - # Init fields. - self.name = name - self.learning_rate = tf.convert_to_tensor(learning_rate) - self.id = self.name.replace('/', '.') - self.scope = tf.get_default_graph().unique_name(self.id) - self.optimizer_class = import_obj(tf_optimizer) - self.optimizer_kwargs = dict(kwargs) - self.use_loss_scaling = use_loss_scaling - self.loss_scaling_init = loss_scaling_init - self.loss_scaling_inc = loss_scaling_inc - self.loss_scaling_dec = loss_scaling_dec - self._grad_shapes = None # [shape, ...] - self._dev_opt = OrderedDict() # device => optimizer - self._dev_grads = OrderedDict() # device => [[(grad, var), ...], ...] - self._dev_ls_var = OrderedDict() # device => variable (log2 of loss scaling factor) - self._updates_applied = False - - # Register the gradients of the given loss function with respect to the given variables. - # Intended to be called once per GPU. - def register_gradients(self, loss, vars): - assert not self._updates_applied - - # Validate arguments. - if isinstance(vars, dict): - vars = list(vars.values()) # allow passing in Network.trainables as vars - assert isinstance(vars, list) and len(vars) >= 1 - assert all(is_tf_expression(expr) for expr in vars + [loss]) - if self._grad_shapes is None: - self._grad_shapes = [shape_to_list(var.shape) for var in vars] - assert len(vars) == len(self._grad_shapes) - assert all(shape_to_list(var.shape) == var_shape for var, var_shape in zip(vars, self._grad_shapes)) - dev = loss.device - assert all(var.device == dev for var in vars) - - # Register device and compute gradients. - with tf.name_scope(self.id + '_grad'), tf.device(dev): - if dev not in self._dev_opt: - opt_name = self.scope.replace('/', '_') + '_opt%d' % len(self._dev_opt) - self._dev_opt[dev] = self.optimizer_class(name=opt_name, learning_rate=self.learning_rate, **self.optimizer_kwargs) - self._dev_grads[dev] = [] - loss = self.apply_loss_scaling(tf.cast(loss, tf.float32)) - grads = self._dev_opt[dev].compute_gradients(loss, vars, gate_gradients=tf.train.Optimizer.GATE_NONE) # disable gating to reduce memory usage - grads = [(g, v) if g is not None else (tf.zeros_like(v), v) for g, v in grads] # replace disconnected gradients with zeros - self._dev_grads[dev].append(grads) - - # Construct training op to update the registered variables based on their gradients. - def apply_updates(self): - assert not self._updates_applied - self._updates_applied = True - devices = list(self._dev_grads.keys()) - total_grads = sum(len(grads) for grads in self._dev_grads.values()) - assert len(devices) >= 1 and total_grads >= 1 - ops = [] - with absolute_name_scope(self.scope): - - # Cast gradients to FP32 and calculate partial sum within each device. - dev_grads = OrderedDict() # device => [(grad, var), ...] - for dev_idx, dev in enumerate(devices): - with tf.name_scope('ProcessGrads%d' % dev_idx), tf.device(dev): - sums = [] - for gv in zip(*self._dev_grads[dev]): - assert all(v is gv[0][1] for g, v in gv) - g = [tf.cast(g, tf.float32) for g, v in gv] - g = g[0] if len(g) == 1 else tf.add_n(g) - sums.append((g, gv[0][1])) - dev_grads[dev] = sums - - # Sum gradients across devices. - if len(devices) > 1: - with tf.name_scope('SumAcrossGPUs'), tf.device(None): - for var_idx, grad_shape in enumerate(self._grad_shapes): - g = [dev_grads[dev][var_idx][0] for dev in devices] - if np.prod(grad_shape): # nccl does not support zero-sized tensors - g = tf.contrib.nccl.all_sum(g) - for dev, gg in zip(devices, g): - dev_grads[dev][var_idx] = (gg, dev_grads[dev][var_idx][1]) - - # Apply updates separately on each device. - for dev_idx, (dev, grads) in enumerate(dev_grads.items()): - with tf.name_scope('ApplyGrads%d' % dev_idx), tf.device(dev): - - # Scale gradients as needed. - if self.use_loss_scaling or total_grads > 1: - with tf.name_scope('Scale'): - coef = tf.constant(np.float32(1.0 / total_grads), name='coef') - coef = self.undo_loss_scaling(coef) - grads = [(g * coef, v) for g, v in grads] - - # Check for overflows. - with tf.name_scope('CheckOverflow'): - grad_ok = tf.reduce_all(tf.stack([tf.reduce_all(tf.is_finite(g)) for g, v in grads])) - - # Update weights and adjust loss scaling. - with tf.name_scope('UpdateWeights'): - opt = self._dev_opt[dev] - ls_var = self.get_loss_scaling_var(dev) - if not self.use_loss_scaling: - ops.append(tf.cond(grad_ok, lambda: opt.apply_gradients(grads), tf.no_op)) - else: - ops.append(tf.cond(grad_ok, - lambda: tf.group(tf.assign_add(ls_var, self.loss_scaling_inc), opt.apply_gradients(grads)), - lambda: tf.group(tf.assign_sub(ls_var, self.loss_scaling_dec)))) - - # Report statistics on the last device. - if dev == devices[-1]: - with tf.name_scope('Statistics'): - ops.append(autosummary(self.id + '/learning_rate', self.learning_rate)) - ops.append(autosummary(self.id + '/overflow_frequency', tf.where(grad_ok, 0, 1))) - if self.use_loss_scaling: - ops.append(autosummary(self.id + '/loss_scaling_log2', ls_var)) - - # Initialize variables and group everything into a single op. - self.reset_optimizer_state() - init_uninited_vars(list(self._dev_ls_var.values())) - return tf.group(*ops, name='TrainingOp') - - # Reset internal state of the underlying optimizer. - def reset_optimizer_state(self): - run([var.initializer for opt in self._dev_opt.values() for var in opt.variables()]) - - # Get or create variable representing log2 of the current dynamic loss scaling factor. - def get_loss_scaling_var(self, device): - if not self.use_loss_scaling: - return None - if device not in self._dev_ls_var: - with absolute_name_scope(self.scope + '/LossScalingVars'), tf.control_dependencies(None): - self._dev_ls_var[device] = tf.Variable(np.float32(self.loss_scaling_init), name='loss_scaling_var') - return self._dev_ls_var[device] - - # Apply dynamic loss scaling for the given expression. - def apply_loss_scaling(self, value): - assert is_tf_expression(value) - if not self.use_loss_scaling: - return value - return value * exp2(self.get_loss_scaling_var(value.device)) - - # Undo the effect of dynamic loss scaling for the given expression. - def undo_loss_scaling(self, value): - assert is_tf_expression(value) - if not self.use_loss_scaling: - return value - return value * exp2(-self.get_loss_scaling_var(value.device)) - -#---------------------------------------------------------------------------- -# Generic network abstraction. -# -# Acts as a convenience wrapper for a parameterized network construction -# function, providing several utility methods and convenient access to -# the inputs/outputs/weights. -# -# Network objects can be safely pickled and unpickled for long-term -# archival purposes. The pickling works reliably as long as the underlying -# network construction function is defined in a standalone Python module -# that has no side effects or application-specific imports. - -network_import_handlers = [] # Custom import handlers for dealing with legacy data in pickle import. -_network_import_modules = [] # Temporary modules create during pickle import. - -class Network: - def __init__(self, - name=None, # Network name. Used to select TensorFlow name and variable scopes. - func=None, # Fully qualified name of the underlying network construction function. - **static_kwargs): # Keyword arguments to be passed in to the network construction function. - - self._init_fields() - self.name = name - self.static_kwargs = dict(static_kwargs) - - # Init build func. - module, self._build_func_name = import_module(func) - self._build_module_src = inspect.getsource(module) - self._build_func = find_obj_in_module(module, self._build_func_name) - - # Init graph. - self._init_graph() - self.reset_vars() - - def _init_fields(self): - self.name = None # User-specified name, defaults to build func name if None. - self.scope = None # Unique TF graph scope, derived from the user-specified name. - self.static_kwargs = dict() # Arguments passed to the user-supplied build func. - self.num_inputs = 0 # Number of input tensors. - self.num_outputs = 0 # Number of output tensors. - self.input_shapes = [[]] # Input tensor shapes (NC or NCHW), including minibatch dimension. - self.output_shapes = [[]] # Output tensor shapes (NC or NCHW), including minibatch dimension. - self.input_shape = [] # Short-hand for input_shapes[0]. - self.output_shape = [] # Short-hand for output_shapes[0]. - self.input_templates = [] # Input placeholders in the template graph. - self.output_templates = [] # Output tensors in the template graph. - self.input_names = [] # Name string for each input. - self.output_names = [] # Name string for each output. - self.vars = OrderedDict() # All variables (localname => var). - self.trainables = OrderedDict() # Trainable variables (localname => var). - self._build_func = None # User-supplied build function that constructs the network. - self._build_func_name = None # Name of the build function. - self._build_module_src = None # Full source code of the module containing the build function. - self._run_cache = dict() # Cached graph data for Network.run(). - - def _init_graph(self): - # Collect inputs. - self.input_names = [] - for param in inspect.signature(self._build_func).parameters.values(): - if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty: - self.input_names.append(param.name) - self.num_inputs = len(self.input_names) - assert self.num_inputs >= 1 - - # Choose name and scope. - if self.name is None: - self.name = self._build_func_name - self.scope = tf.get_default_graph().unique_name(self.name.replace('/', '_'), mark_as_used=False) - - # Build template graph. - with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE): - assert tf.get_variable_scope().name == self.scope - with absolute_name_scope(self.scope): # ignore surrounding name_scope - with tf.control_dependencies(None): # ignore surrounding control_dependencies - self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names] - out_expr = self._build_func(*self.input_templates, is_template_graph=True, **self.static_kwargs) - - # Collect outputs. - assert is_tf_expression(out_expr) or isinstance(out_expr, tuple) - self.output_templates = [out_expr] if is_tf_expression(out_expr) else list(out_expr) - self.output_names = [t.name.split('/')[-1].split(':')[0] for t in self.output_templates] - self.num_outputs = len(self.output_templates) - assert self.num_outputs >= 1 - - # Populate remaining fields. - self.input_shapes = [shape_to_list(t.shape) for t in self.input_templates] - self.output_shapes = [shape_to_list(t.shape) for t in self.output_templates] - self.input_shape = self.input_shapes[0] - self.output_shape = self.output_shapes[0] - self.vars = OrderedDict([(self.get_var_localname(var), var) for var in tf.global_variables(self.scope + '/')]) - self.trainables = OrderedDict([(self.get_var_localname(var), var) for var in tf.trainable_variables(self.scope + '/')]) - - # Run initializers for all variables defined by this network. - def reset_vars(self): - run([var.initializer for var in self.vars.values()]) - - # Run initializers for all trainable variables defined by this network. - def reset_trainables(self): - run([var.initializer for var in self.trainables.values()]) - - # Get TensorFlow expression(s) for the output(s) of this network, given the inputs. - def get_output_for(self, *in_expr, return_as_list=False, **dynamic_kwargs): - assert len(in_expr) == self.num_inputs - all_kwargs = dict(self.static_kwargs) - all_kwargs.update(dynamic_kwargs) - with tf.variable_scope(self.scope, reuse=True): - assert tf.get_variable_scope().name == self.scope - named_inputs = [tf.identity(expr, name=name) for expr, name in zip(in_expr, self.input_names)] - out_expr = self._build_func(*named_inputs, **all_kwargs) - assert is_tf_expression(out_expr) or isinstance(out_expr, tuple) - if return_as_list: - out_expr = [out_expr] if is_tf_expression(out_expr) else list(out_expr) - return out_expr - - # Get the local name of a given variable, excluding any surrounding name scopes. - def get_var_localname(self, var_or_globalname): - assert is_tf_expression(var_or_globalname) or isinstance(var_or_globalname, str) - globalname = var_or_globalname if isinstance(var_or_globalname, str) else var_or_globalname.name - assert globalname.startswith(self.scope + '/') - localname = globalname[len(self.scope) + 1:] - localname = localname.split(':')[0] - return localname - - # Find variable by local or global name. - def find_var(self, var_or_localname): - assert is_tf_expression(var_or_localname) or isinstance(var_or_localname, str) - return self.vars[var_or_localname] if isinstance(var_or_localname, str) else var_or_localname - - # Get the value of a given variable as NumPy array. - # Note: This method is very inefficient -- prefer to use tfutil.run(list_of_vars) whenever possible. - def get_var(self, var_or_localname): - return self.find_var(var_or_localname).eval() - - # Set the value of a given variable based on the given NumPy array. - # Note: This method is very inefficient -- prefer to use tfutil.set_vars() whenever possible. - def set_var(self, var_or_localname, new_value): - return set_vars({self.find_var(var_or_localname): new_value}) - - # Pickle export. - def __getstate__(self): - return { - 'version': 2, - 'name': self.name, - 'static_kwargs': self.static_kwargs, - 'build_module_src': self._build_module_src, - 'build_func_name': self._build_func_name, - 'variables': list(zip(self.vars.keys(), run(list(self.vars.values()))))} - - # Pickle import. - def __setstate__(self, state): - self._init_fields() - - # Execute custom import handlers. - for handler in network_import_handlers: - state = handler(state) - - # Set basic fields. - assert state['version'] == 2 - self.name = state['name'] - self.static_kwargs = state['static_kwargs'] - self._build_module_src = state['build_module_src'] - self._build_func_name = state['build_func_name'] - - # Parse imported module. - module = imp.new_module('_tfutil_network_import_module_%d' % len(_network_import_modules)) - exec(self._build_module_src, module.__dict__) - self._build_func = find_obj_in_module(module, self._build_func_name) - _network_import_modules.append(module) # avoid gc - - # Init graph. - self._init_graph() - self.reset_vars() - set_vars({self.find_var(name): value for name, value in state['variables']}) - - # Create a clone of this network with its own copy of the variables. - def clone(self, name=None): - net = object.__new__(Network) - net._init_fields() - net.name = name if name is not None else self.name - net.static_kwargs = dict(self.static_kwargs) - net._build_module_src = self._build_module_src - net._build_func_name = self._build_func_name - net._build_func = self._build_func - net._init_graph() - net.copy_vars_from(self) - return net - - # Copy the values of all variables from the given network. - def copy_vars_from(self, src_net): - assert isinstance(src_net, Network) - name_to_value = run({name: src_net.find_var(name) for name in self.vars.keys()}) - set_vars({self.find_var(name): value for name, value in name_to_value.items()}) - - # Copy the values of all trainable variables from the given network. - def copy_trainables_from(self, src_net): - assert isinstance(src_net, Network) - name_to_value = run({name: src_net.find_var(name) for name in self.trainables.keys()}) - set_vars({self.find_var(name): value for name, value in name_to_value.items()}) - - # Create new network with the given parameters, and copy all variables from this network. - def convert(self, name=None, func=None, **static_kwargs): - net = Network(name, func, **static_kwargs) - net.copy_vars_from(self) - return net - - # Construct a TensorFlow op that updates the variables of this network - # to be slightly closer to those of the given network. - def setup_as_moving_average_of(self, src_net, beta=0.99, beta_nontrainable=0.0): - assert isinstance(src_net, Network) - with absolute_name_scope(self.scope): - with tf.name_scope('MovingAvg'): - ops = [] - for name, var in self.vars.items(): - if name in src_net.vars: - cur_beta = beta if name in self.trainables else beta_nontrainable - new_value = lerp(src_net.vars[name], var, cur_beta) - ops.append(var.assign(new_value)) - return tf.group(*ops) - - # Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s). - def run(self, *in_arrays, - return_as_list = False, # True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs. - print_progress = False, # Print progress to the console? Useful for very large input arrays. - minibatch_size = None, # Maximum minibatch size to use, None = disable batching. - num_gpus = 1, # Number of GPUs to use. - out_mul = 1.0, # Multiplicative constant to apply to the output(s). - out_add = 0.0, # Additive constant to apply to the output(s). - out_shrink = 1, # Shrink the spatial dimensions of the output(s) by the given factor. - out_dtype = None, # Convert the output to the specified data type. - **dynamic_kwargs): # Additional keyword arguments to pass into the network construction function. - - assert len(in_arrays) == self.num_inputs - num_items = in_arrays[0].shape[0] - if minibatch_size is None: - minibatch_size = num_items - key = str([list(sorted(dynamic_kwargs.items())), num_gpus, out_mul, out_add, out_shrink, out_dtype]) - - # Build graph. - if key not in self._run_cache: - with absolute_name_scope(self.scope + '/Run'), tf.control_dependencies(None): - in_split = list(zip(*[tf.split(x, num_gpus) for x in self.input_templates])) - out_split = [] - for gpu in range(num_gpus): - with tf.device('/gpu:%d' % gpu): - out_expr = self.get_output_for(*in_split[gpu], return_as_list=True, **dynamic_kwargs) - if out_mul != 1.0: - out_expr = [x * out_mul for x in out_expr] - if out_add != 0.0: - out_expr = [x + out_add for x in out_expr] - if out_shrink > 1: - ksize = [1, 1, out_shrink, out_shrink] - out_expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') for x in out_expr] - if out_dtype is not None: - if tf.as_dtype(out_dtype).is_integer: - out_expr = [tf.round(x) for x in out_expr] - out_expr = [tf.saturate_cast(x, out_dtype) for x in out_expr] - out_split.append(out_expr) - self._run_cache[key] = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)] - - # Run minibatches. - out_expr = self._run_cache[key] - out_arrays = [np.empty([num_items] + shape_to_list(expr.shape)[1:], expr.dtype.name) for expr in out_expr] - for mb_begin in range(0, num_items, minibatch_size): - if print_progress: - print('\r%d / %d' % (mb_begin, num_items), end='') - mb_end = min(mb_begin + minibatch_size, num_items) - mb_in = [src[mb_begin : mb_end] for src in in_arrays] - mb_out = tf.get_default_session().run(out_expr, dict(zip(self.input_templates, mb_in))) - for dst, src in zip(out_arrays, mb_out): - dst[mb_begin : mb_end] = src - - # Done. - if print_progress: - print('\r%d / %d' % (num_items, num_items)) - if not return_as_list: - out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays) - return out_arrays - - # Returns a list of (name, output_expr, trainable_vars) tuples corresponding to - # individual layers of the network. Mainly intended to be used for reporting. - def list_layers(self): - patterns_to_ignore = ['/Setter', '/new_value', '/Shape', '/strided_slice', '/Cast', '/concat'] - all_ops = tf.get_default_graph().get_operations() - all_ops = [op for op in all_ops if not any(p in op.name for p in patterns_to_ignore)] - layers = [] - - def recurse(scope, parent_ops, level): - prefix = scope + '/' - ops = [op for op in parent_ops if op.name == scope or op.name.startswith(prefix)] - - # Does not contain leaf nodes => expand immediate children. - if level == 0 or all('/' in op.name[len(prefix):] for op in ops): - visited = set() - for op in ops: - suffix = op.name[len(prefix):] - if '/' in suffix: - suffix = suffix[:suffix.index('/')] - if suffix not in visited: - recurse(prefix + suffix, ops, level + 1) - visited.add(suffix) - - # Otherwise => interpret as a layer. - else: - layer_name = scope[len(self.scope)+1:] - layer_output = ops[-1].outputs[0] - layer_trainables = [op.outputs[0] for op in ops if op.type.startswith('Variable') and self.get_var_localname(op.name) in self.trainables] - layers.append((layer_name, layer_output, layer_trainables)) - - recurse(self.scope, all_ops, 0) - return layers - - # Print a summary table of the network structure. - def print_layers(self, title=None, hide_layers_with_no_params=False): - if title is None: title = self.name - print() - print('%-28s%-12s%-24s%-24s' % (title, 'Params', 'OutputShape', 'WeightShape')) - print('%-28s%-12s%-24s%-24s' % (('---',) * 4)) - - total_params = 0 - for layer_name, layer_output, layer_trainables in self.list_layers(): - weights = [var for var in layer_trainables if var.name.endswith('/weight:0')] - num_params = sum(np.prod(shape_to_list(var.shape)) for var in layer_trainables) - total_params += num_params - if hide_layers_with_no_params and num_params == 0: - continue - - print('%-28s%-12s%-24s%-24s' % ( - layer_name, - num_params if num_params else '-', - layer_output.shape, - weights[0].shape if len(weights) == 1 else '-')) - - print('%-28s%-12s%-24s%-24s' % (('---',) * 4)) - print('%-28s%-12s%-24s%-24s' % ('Total', total_params, '', '')) - print() - - # Construct summary ops to include histograms of all trainable parameters in TensorBoard. - def setup_weight_histograms(self, title=None): - if title is None: title = self.name - with tf.name_scope(None), tf.device(None), tf.control_dependencies(None): - for localname, var in self.trainables.items(): - if '/' in localname: - p = localname.split('/') - name = title + '_' + p[-1] + '/' + '_'.join(p[:-1]) - else: - name = title + '_toplevel/' + localname - tf.summary.histogram(name, var) - -#---------------------------------------------------------------------------- diff --git a/spaces/yeqingmei123/face-test/e4e/editings/latent_editor.py b/spaces/yeqingmei123/face-test/e4e/editings/latent_editor.py deleted file mode 100644 index 4bebca2f5c86f71b58fa1f30d24bfcb0da06d88f..0000000000000000000000000000000000000000 --- a/spaces/yeqingmei123/face-test/e4e/editings/latent_editor.py +++ /dev/null @@ -1,45 +0,0 @@ -import torch -import sys -sys.path.append(".") -sys.path.append("..") -from editings import ganspace, sefa -from utils.common import tensor2im - - -class LatentEditor(object): - def __init__(self, stylegan_generator, is_cars=False): - self.generator = stylegan_generator - self.is_cars = is_cars # Since the cars StyleGAN output is 384x512, there is a need to crop the 512x512 output. - - def apply_ganspace(self, latent, ganspace_pca, edit_directions): - edit_latents = ganspace.edit(latent, ganspace_pca, edit_directions) - return self._latents_to_image(edit_latents) - - def apply_interfacegan(self, latent, direction, factor=1, factor_range=None): - edit_latents = [] - if factor_range is not None: # Apply a range of editing factors. for example, (-5, 5) - for f in range(*factor_range): - edit_latent = latent + f * direction - edit_latents.append(edit_latent) - edit_latents = torch.cat(edit_latents) - else: - edit_latents = latent + factor * direction - return self._latents_to_image(edit_latents) - - def apply_sefa(self, latent, indices=[2, 3, 4, 5], **kwargs): - edit_latents = sefa.edit(self.generator, latent, indices, **kwargs) - return self._latents_to_image(edit_latents) - - # Currently, in order to apply StyleFlow editings, one should run inference, - # save the latent codes and load them form the official StyleFlow repository. - # def apply_styleflow(self): - # pass - - def _latents_to_image(self, latents): - with torch.no_grad(): - images, _ = self.generator([latents], randomize_noise=False, input_is_latent=True) - if self.is_cars: - images = images[:, :, 64:448, :] # 512x512 -> 384x512 - horizontal_concat_image = torch.cat(list(images), 2) - final_image = tensor2im(horizontal_concat_image) - return final_image diff --git a/spaces/yeqingmei123/face-test/e4e/models/latent_codes_pool.py b/spaces/yeqingmei123/face-test/e4e/models/latent_codes_pool.py deleted file mode 100644 index 0281d4b5e80f8eb26e824fa35b4f908dcb6634e6..0000000000000000000000000000000000000000 --- a/spaces/yeqingmei123/face-test/e4e/models/latent_codes_pool.py +++ /dev/null @@ -1,55 +0,0 @@ -import random -import torch - - -class LatentCodesPool: - """This class implements latent codes buffer that stores previously generated w latent codes. - This buffer enables us to update discriminators using a history of generated w's - rather than the ones produced by the latest encoder. - """ - - def __init__(self, pool_size): - """Initialize the ImagePool class - Parameters: - pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created - """ - self.pool_size = pool_size - if self.pool_size > 0: # create an empty pool - self.num_ws = 0 - self.ws = [] - - def query(self, ws): - """Return w's from the pool. - Parameters: - ws: the latest generated w's from the generator - Returns w's from the buffer. - By 50/100, the buffer will return input w's. - By 50/100, the buffer will return w's previously stored in the buffer, - and insert the current w's to the buffer. - """ - if self.pool_size == 0: # if the buffer size is 0, do nothing - return ws - return_ws = [] - for w in ws: # ws.shape: (batch, 512) or (batch, n_latent, 512) - # w = torch.unsqueeze(image.data, 0) - if w.ndim == 2: - i = random.randint(0, len(w) - 1) # apply a random latent index as a candidate - w = w[i] - self.handle_w(w, return_ws) - return_ws = torch.stack(return_ws, 0) # collect all the images and return - return return_ws - - def handle_w(self, w, return_ws): - if self.num_ws < self.pool_size: # if the buffer is not full; keep inserting current codes to the buffer - self.num_ws = self.num_ws + 1 - self.ws.append(w) - return_ws.append(w) - else: - p = random.uniform(0, 1) - if p > 0.5: # by 50% chance, the buffer will return a previously stored latent code, and insert the current code into the buffer - random_id = random.randint(0, self.pool_size - 1) # randint is inclusive - tmp = self.ws[random_id].clone() - self.ws[random_id] = w - return_ws.append(tmp) - else: # by another 50% chance, the buffer will return the current image - return_ws.append(w) diff --git a/spaces/ygangang/CodeFormer/CodeFormer/scripts/download_pretrained_models_from_gdrive.py b/spaces/ygangang/CodeFormer/CodeFormer/scripts/download_pretrained_models_from_gdrive.py deleted file mode 100644 index 7df5be6fc260394ee9bbd0a7ae377e2ca657fe83..0000000000000000000000000000000000000000 --- a/spaces/ygangang/CodeFormer/CodeFormer/scripts/download_pretrained_models_from_gdrive.py +++ /dev/null @@ -1,60 +0,0 @@ -import argparse -import os -from os import path as osp - -# from basicsr.utils.download_util import download_file_from_google_drive -import gdown - - -def download_pretrained_models(method, file_ids): - save_path_root = f'./weights/{method}' - os.makedirs(save_path_root, exist_ok=True) - - for file_name, file_id in file_ids.items(): - file_url = 'https://drive.google.com/uc?id='+file_id - save_path = osp.abspath(osp.join(save_path_root, file_name)) - if osp.exists(save_path): - user_response = input(f'{file_name} already exist. Do you want to cover it? Y/N\n') - if user_response.lower() == 'y': - print(f'Covering {file_name} to {save_path}') - gdown.download(file_url, save_path, quiet=False) - # download_file_from_google_drive(file_id, save_path) - elif user_response.lower() == 'n': - print(f'Skipping {file_name}') - else: - raise ValueError('Wrong input. Only accepts Y/N.') - else: - print(f'Downloading {file_name} to {save_path}') - gdown.download(file_url, save_path, quiet=False) - # download_file_from_google_drive(file_id, save_path) - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - - parser.add_argument( - 'method', - type=str, - help=("Options: 'CodeFormer' 'facelib'. Set to 'all' to download all the models.")) - args = parser.parse_args() - - # file name: file id - # 'dlib': { - # 'mmod_human_face_detector-4cb19393.dat': '1qD-OqY8M6j4PWUP_FtqfwUPFPRMu6ubX', - # 'shape_predictor_5_face_landmarks-c4b1e980.dat': '1vF3WBUApw4662v9Pw6wke3uk1qxnmLdg', - # 'shape_predictor_68_face_landmarks-fbdc2cb8.dat': '1tJyIVdCHaU6IDMDx86BZCxLGZfsWB8yq' - # } - file_ids = { - 'CodeFormer': { - 'codeformer.pth': '1v_E_vZvP-dQPF55Kc5SRCjaKTQXDz-JB' - }, - 'facelib': { - 'yolov5l-face.pth': '131578zMA6B2x8VQHyHfa6GEPtulMCNzV', - 'parsing_parsenet.pth': '16pkohyZZ8ViHGBk3QtVqxLZKzdo466bK' - } - } - - if args.method == 'all': - for method in file_ids.keys(): - download_pretrained_models(method, file_ids[method]) - else: - download_pretrained_models(args.method, file_ids[args.method]) \ No newline at end of file diff --git a/spaces/ygangang/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md b/spaces/ygangang/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md deleted file mode 100644 index 9af54dca9f1956d33877bf7df09b34c2d6ddeeaf..0000000000000000000000000000000000000000 --- a/spaces/ygangang/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Image Animation Using Thin Plate Spline Motion Model -emoji: 👁 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.0.19 -app_file: app.py -pinned: false -duplicated_from: CVPR/Image-Animation-using-Thin-Plate-Spline-Motion-Model ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/fnet/modeling_fnet.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/fnet/modeling_fnet.py deleted file mode 100644 index 45042147761d5699f47b7d7e1a0a1ad9e445aa16..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/fnet/modeling_fnet.py +++ /dev/null @@ -1,1196 +0,0 @@ -# coding=utf-8 -# Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" PyTorch FNet model.""" - -import warnings -from dataclasses import dataclass -from functools import partial -from typing import Optional, Tuple, Union - -import torch -import torch.utils.checkpoint -from torch import nn -from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss - -from ...utils import is_scipy_available - - -if is_scipy_available(): - from scipy import linalg - -from ...activations import ACT2FN -from ...modeling_outputs import ( - BaseModelOutput, - BaseModelOutputWithPooling, - MaskedLMOutput, - ModelOutput, - MultipleChoiceModelOutput, - NextSentencePredictorOutput, - QuestionAnsweringModelOutput, - SequenceClassifierOutput, - TokenClassifierOutput, -) -from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import apply_chunking_to_forward -from ...utils import ( - add_code_sample_docstrings, - add_start_docstrings, - add_start_docstrings_to_model_forward, - logging, - replace_return_docstrings, -) -from .configuration_fnet import FNetConfig - - -logger = logging.get_logger(__name__) - -_CHECKPOINT_FOR_DOC = "google/fnet-base" -_CONFIG_FOR_DOC = "FNetConfig" - -FNET_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "google/fnet-base", - "google/fnet-large" - # See all FNet models at https://huggingface.co/models?filter=fnet -] - - -# Adapted from https://github.com/google-research/google-research/blob/master/f_net/fourier.py -def _two_dim_matmul(x, matrix_dim_one, matrix_dim_two): - """Applies 2D matrix multiplication to 3D input arrays.""" - seq_length = x.shape[1] - matrix_dim_one = matrix_dim_one[:seq_length, :seq_length] - x = x.type(torch.complex64) - return torch.einsum("bij,jk,ni->bnk", x, matrix_dim_two, matrix_dim_one) - - -# # Adapted from https://github.com/google-research/google-research/blob/master/f_net/fourier.py -def two_dim_matmul(x, matrix_dim_one, matrix_dim_two): - return _two_dim_matmul(x, matrix_dim_one, matrix_dim_two) - - -# Adapted from https://github.com/google-research/google-research/blob/master/f_net/fourier.py -def fftn(x): - """ - Applies n-dimensional Fast Fourier Transform (FFT) to input array. - - Args: - x: Input n-dimensional array. - - Returns: - n-dimensional Fourier transform of input n-dimensional array. - """ - out = x - for axis in reversed(range(x.ndim)[1:]): # We don't need to apply FFT to last axis - out = torch.fft.fft(out, axis=axis) - return out - - -class FNetEmbeddings(nn.Module): - """Construct the embeddings from word, position and token_type embeddings.""" - - def __init__(self, config): - super().__init__() - self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) - self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) - self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) - - # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load - # any TensorFlow checkpoint file - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - # NOTE: This is the project layer and will be needed. The original code allows for different embedding and different model dimensions. - self.projection = nn.Linear(config.hidden_size, config.hidden_size) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - # position_ids (1, len position emb) is contiguous in memory and exported when serialized - self.register_buffer( - "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False - ) - - self.register_buffer( - "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False - ) - - def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): - if input_ids is not None: - input_shape = input_ids.size() - else: - input_shape = inputs_embeds.size()[:-1] - - seq_length = input_shape[1] - - if position_ids is None: - position_ids = self.position_ids[:, :seq_length] - - # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs - # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves - # issue #5664 - if token_type_ids is None: - if hasattr(self, "token_type_ids"): - buffered_token_type_ids = self.token_type_ids[:, :seq_length] - buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) - token_type_ids = buffered_token_type_ids_expanded - else: - token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) - - if inputs_embeds is None: - inputs_embeds = self.word_embeddings(input_ids) - token_type_embeddings = self.token_type_embeddings(token_type_ids) - - embeddings = inputs_embeds + token_type_embeddings - - position_embeddings = self.position_embeddings(position_ids) - embeddings += position_embeddings - embeddings = self.LayerNorm(embeddings) - embeddings = self.projection(embeddings) - embeddings = self.dropout(embeddings) - return embeddings - - -class FNetBasicFourierTransform(nn.Module): - def __init__(self, config): - super().__init__() - self._init_fourier_transform(config) - - def _init_fourier_transform(self, config): - if not config.use_tpu_fourier_optimizations: - self.fourier_transform = partial(torch.fft.fftn, dim=(1, 2)) - elif config.max_position_embeddings <= 4096: - if is_scipy_available(): - self.register_buffer( - "dft_mat_hidden", torch.tensor(linalg.dft(config.hidden_size), dtype=torch.complex64) - ) - self.register_buffer( - "dft_mat_seq", torch.tensor(linalg.dft(config.tpu_short_seq_length), dtype=torch.complex64) - ) - self.fourier_transform = partial( - two_dim_matmul, matrix_dim_one=self.dft_mat_seq, matrix_dim_two=self.dft_mat_hidden - ) - else: - logging.warning( - "SciPy is needed for DFT matrix calculation and is not found. Using TPU optimized fast fourier" - " transform instead." - ) - self.fourier_transform = fftn - else: - self.fourier_transform = fftn - - def forward(self, hidden_states): - # NOTE: We do not use torch.vmap as it is not integrated into PyTorch stable versions. - # Interested users can modify the code to use vmap from the nightly versions, getting the vmap from here: - # https://pytorch.org/docs/master/generated/torch.vmap.html. Note that fourier transform methods will need - # change accordingly. - - outputs = self.fourier_transform(hidden_states).real - return (outputs,) - - -class FNetBasicOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - - def forward(self, hidden_states, input_tensor): - hidden_states = self.LayerNorm(input_tensor + hidden_states) - return hidden_states - - -class FNetFourierTransform(nn.Module): - def __init__(self, config): - super().__init__() - self.self = FNetBasicFourierTransform(config) - self.output = FNetBasicOutput(config) - - def forward(self, hidden_states): - self_outputs = self.self(hidden_states) - fourier_output = self.output(self_outputs[0], hidden_states) - outputs = (fourier_output,) - return outputs - - -# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->FNet -class FNetIntermediate(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.intermediate_size) - if isinstance(config.hidden_act, str): - self.intermediate_act_fn = ACT2FN[config.hidden_act] - else: - self.intermediate_act_fn = config.hidden_act - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) - return hidden_states - - -# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->FNet -class FNetOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.intermediate_size, config.hidden_size) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class FNetLayer(nn.Module): - def __init__(self, config): - super().__init__() - self.chunk_size_feed_forward = config.chunk_size_feed_forward - self.seq_len_dim = 1 # The dimension which has the sequence length - self.fourier = FNetFourierTransform(config) - self.intermediate = FNetIntermediate(config) - self.output = FNetOutput(config) - - def forward(self, hidden_states): - self_fourier_outputs = self.fourier(hidden_states) - fourier_output = self_fourier_outputs[0] - - layer_output = apply_chunking_to_forward( - self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, fourier_output - ) - - outputs = (layer_output,) - - return outputs - - def feed_forward_chunk(self, fourier_output): - intermediate_output = self.intermediate(fourier_output) - layer_output = self.output(intermediate_output, fourier_output) - return layer_output - - -class FNetEncoder(nn.Module): - def __init__(self, config): - super().__init__() - self.config = config - self.layer = nn.ModuleList([FNetLayer(config) for _ in range(config.num_hidden_layers)]) - self.gradient_checkpointing = False - - def forward(self, hidden_states, output_hidden_states=False, return_dict=True): - all_hidden_states = () if output_hidden_states else None - - for i, layer_module in enumerate(self.layer): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if self.gradient_checkpointing and self.training: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs) - - return custom_forward - - layer_outputs = torch.utils.checkpoint.checkpoint(create_custom_forward(layer_module), hidden_states) - else: - layer_outputs = layer_module(hidden_states) - - hidden_states = layer_outputs[0] - - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) - - return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states) - - -# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->FNet -class FNetPooler(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.activation = nn.Tanh() - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - # We "pool" the model by simply taking the hidden state corresponding - # to the first token. - first_token_tensor = hidden_states[:, 0] - pooled_output = self.dense(first_token_tensor) - pooled_output = self.activation(pooled_output) - return pooled_output - - -# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->FNet -class FNetPredictionHeadTransform(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - if isinstance(config.hidden_act, str): - self.transform_act_fn = ACT2FN[config.hidden_act] - else: - self.transform_act_fn = config.hidden_act - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.transform_act_fn(hidden_states) - hidden_states = self.LayerNorm(hidden_states) - return hidden_states - - -class FNetLMPredictionHead(nn.Module): - def __init__(self, config): - super().__init__() - self.transform = FNetPredictionHeadTransform(config) - - # The output weights are the same as the input embeddings, but there is - # an output-only bias for each token. - self.decoder = nn.Linear(config.hidden_size, config.vocab_size) - - self.bias = nn.Parameter(torch.zeros(config.vocab_size)) - self.decoder.bias = self.bias - - def forward(self, hidden_states): - hidden_states = self.transform(hidden_states) - hidden_states = self.decoder(hidden_states) - return hidden_states - - def _tie_weights(self): - # To tie those two weights if they get disconnected (on TPU or when the bias is resized) - self.bias = self.decoder.bias - - -class FNetOnlyMLMHead(nn.Module): - def __init__(self, config): - super().__init__() - self.predictions = FNetLMPredictionHead(config) - - def forward(self, sequence_output): - prediction_scores = self.predictions(sequence_output) - return prediction_scores - - -# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->FNet -class FNetOnlyNSPHead(nn.Module): - def __init__(self, config): - super().__init__() - self.seq_relationship = nn.Linear(config.hidden_size, 2) - - def forward(self, pooled_output): - seq_relationship_score = self.seq_relationship(pooled_output) - return seq_relationship_score - - -# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->FNet -class FNetPreTrainingHeads(nn.Module): - def __init__(self, config): - super().__init__() - self.predictions = FNetLMPredictionHead(config) - self.seq_relationship = nn.Linear(config.hidden_size, 2) - - def forward(self, sequence_output, pooled_output): - prediction_scores = self.predictions(sequence_output) - seq_relationship_score = self.seq_relationship(pooled_output) - return prediction_scores, seq_relationship_score - - -class FNetPreTrainedModel(PreTrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = FNetConfig - base_model_prefix = "fnet" - supports_gradient_checkpointing = True - - def _init_weights(self, module): - """Initialize the weights""" - if isinstance(module, nn.Linear): - # Slightly different from the TF version which uses truncated_normal for initialization - # cf https://github.com/pytorch/pytorch/pull/5617 - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - # NOTE: Original code uses same initialization as weights for biases as well. - if module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, nn.Embedding): - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - if module.padding_idx is not None: - module.weight.data[module.padding_idx].zero_() - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - - def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, FNetEncoder): - module.gradient_checkpointing = value - - -@dataclass -class FNetForPreTrainingOutput(ModelOutput): - """ - Output type of [`FNetForPreTraining`]. - - Args: - loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): - Total loss as the sum of the masked language modeling loss and the next sequence prediction - (classification) loss. - prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): - Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). - seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): - Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation - before SoftMax). - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of - shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer - plus the initial embedding outputs. - """ - - loss: Optional[torch.FloatTensor] = None - prediction_logits: torch.FloatTensor = None - seq_relationship_logits: torch.FloatTensor = None - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - - -FNET_START_DOCSTRING = r""" - This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use - it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and - behavior. - - Parameters: - config ([`FNetConfig`]): Model configuration class with all the parameters of the model. - Initializing with a config file does not load the weights associated with the model, only the - configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - -FNET_INPUTS_DOCSTRING = r""" - Args: - input_ids (`torch.LongTensor` of shape `({0})`): - Indices of input sequence tokens in the vocabulary. - - Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): - Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, - 1]`: - - - 0 corresponds to a *sentence A* token, - - 1 corresponds to a *sentence B* token. - - [What are token type IDs?](../glossary#token-type-ids) - position_ids (`torch.LongTensor` of shape `({0})`, *optional*): - Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, - config.max_position_embeddings - 1]`. - - [What are position IDs?](../glossary#position-ids) - - inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This - is useful if you want more control over how to convert *input_ids* indices into associated vectors than the - model's internal embedding lookup matrix. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - - -@add_start_docstrings( - "The bare FNet Model transformer outputting raw hidden-states without any specific head on top.", - FNET_START_DOCSTRING, -) -class FNetModel(FNetPreTrainedModel): - """ - - The model can behave as an encoder, following the architecture described in [FNet: Mixing Tokens with Fourier - Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. - - """ - - def __init__(self, config, add_pooling_layer=True): - super().__init__(config) - self.config = config - - self.embeddings = FNetEmbeddings(config) - self.encoder = FNetEncoder(config) - - self.pooler = FNetPooler(config) if add_pooling_layer else None - - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self): - return self.embeddings.word_embeddings - - def set_input_embeddings(self, value): - self.embeddings.word_embeddings = value - - @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=BaseModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.LongTensor] = None, - token_type_ids: Optional[torch.LongTensor] = None, - position_ids: Optional[torch.LongTensor] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[tuple, BaseModelOutput]: - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - input_shape = input_ids.size() - batch_size, seq_length = input_shape - elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] - batch_size, seq_length = input_shape - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - if ( - self.config.use_tpu_fourier_optimizations - and seq_length <= 4096 - and self.config.tpu_short_seq_length != seq_length - ): - raise ValueError( - "The `tpu_short_seq_length` in FNetConfig should be set equal to the sequence length being passed to" - " the model when using TPU optimizations." - ) - - device = input_ids.device if input_ids is not None else inputs_embeds.device - - if token_type_ids is None: - if hasattr(self.embeddings, "token_type_ids"): - buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] - buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) - token_type_ids = buffered_token_type_ids_expanded - else: - token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) - - embedding_output = self.embeddings( - input_ids=input_ids, - position_ids=position_ids, - token_type_ids=token_type_ids, - inputs_embeds=inputs_embeds, - ) - encoder_outputs = self.encoder( - embedding_output, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - sequence_output = encoder_outputs[0] - - pooler_output = self.pooler(sequence_output) if self.pooler is not None else None - - if not return_dict: - return (sequence_output, pooler_output) + encoder_outputs[1:] - - return BaseModelOutputWithPooling( - last_hidden_state=sequence_output, - pooler_output=pooler_output, - hidden_states=encoder_outputs.hidden_states, - ) - - -@add_start_docstrings( - """ - FNet Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next - sentence prediction (classification)` head. - """, - FNET_START_DOCSTRING, -) -class FNetForPreTraining(FNetPreTrainedModel): - _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"] - - def __init__(self, config): - super().__init__(config) - - self.fnet = FNetModel(config) - self.cls = FNetPreTrainingHeads(config) - - # Initialize weights and apply final processing - self.post_init() - - def get_output_embeddings(self): - return self.cls.predictions.decoder - - def set_output_embeddings(self, new_embeddings): - self.cls.predictions.decoder = new_embeddings - - @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @replace_return_docstrings(output_type=FNetForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - next_sentence_label: Optional[torch.Tensor] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, FNetForPreTrainingOutput]: - r""" - labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., - config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the - loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` - next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair - (see `input_ids` docstring) Indices should be in `[0, 1]`: - - - 0 indicates sequence B is a continuation of sequence A, - - 1 indicates sequence B is a random sequence. - kwargs (`Dict[str, any]`, optional, defaults to *{}*): - Used to hide legacy arguments that have been deprecated. - - Returns: - - Example: - - ```python - >>> from transformers import AutoTokenizer, FNetForPreTraining - >>> import torch - - >>> tokenizer = AutoTokenizer.from_pretrained("google/fnet-base") - >>> model = FNetForPreTraining.from_pretrained("google/fnet-base") - >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") - >>> outputs = model(**inputs) - >>> prediction_logits = outputs.prediction_logits - >>> seq_relationship_logits = outputs.seq_relationship_logits - ```""" - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - outputs = self.fnet( - input_ids, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - sequence_output, pooled_output = outputs[:2] - prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) - - total_loss = None - if labels is not None and next_sentence_label is not None: - loss_fct = CrossEntropyLoss() - masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) - next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) - total_loss = masked_lm_loss + next_sentence_loss - - if not return_dict: - output = (prediction_scores, seq_relationship_score) + outputs[2:] - return ((total_loss,) + output) if total_loss is not None else output - - return FNetForPreTrainingOutput( - loss=total_loss, - prediction_logits=prediction_scores, - seq_relationship_logits=seq_relationship_score, - hidden_states=outputs.hidden_states, - ) - - -@add_start_docstrings("""FNet Model with a `language modeling` head on top.""", FNET_START_DOCSTRING) -class FNetForMaskedLM(FNetPreTrainedModel): - _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"] - - def __init__(self, config): - super().__init__(config) - - self.fnet = FNetModel(config) - self.cls = FNetOnlyMLMHead(config) - - # Initialize weights and apply final processing - self.post_init() - - def get_output_embeddings(self): - return self.cls.predictions.decoder - - def set_output_embeddings(self, new_embeddings): - self.cls.predictions.decoder = new_embeddings - - @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=MaskedLMOutput, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, MaskedLMOutput]: - r""" - labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., - config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the - loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - outputs = self.fnet( - input_ids, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - sequence_output = outputs[0] - prediction_scores = self.cls(sequence_output) - - masked_lm_loss = None - if labels is not None: - loss_fct = CrossEntropyLoss() # -100 index = padding token - masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) - - if not return_dict: - output = (prediction_scores,) + outputs[2:] - return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output - - return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states) - - -@add_start_docstrings( - """FNet Model with a `next sentence prediction (classification)` head on top.""", - FNET_START_DOCSTRING, -) -class FNetForNextSentencePrediction(FNetPreTrainedModel): - def __init__(self, config): - super().__init__(config) - - self.fnet = FNetModel(config) - self.cls = FNetOnlyNSPHead(config) - - # Initialize weights and apply final processing - self.post_init() - - @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - **kwargs, - ) -> Union[Tuple, NextSentencePredictorOutput]: - r""" - labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair - (see `input_ids` docstring). Indices should be in `[0, 1]`: - - - 0 indicates sequence B is a continuation of sequence A, - - 1 indicates sequence B is a random sequence. - - Returns: - - Example: - - ```python - >>> from transformers import AutoTokenizer, FNetForNextSentencePrediction - >>> import torch - - >>> tokenizer = AutoTokenizer.from_pretrained("google/fnet-base") - >>> model = FNetForNextSentencePrediction.from_pretrained("google/fnet-base") - >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." - >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." - >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt") - >>> outputs = model(**encoding, labels=torch.LongTensor([1])) - >>> logits = outputs.logits - >>> assert logits[0, 0] < logits[0, 1] # next sentence was random - ```""" - - if "next_sentence_label" in kwargs: - warnings.warn( - "The `next_sentence_label` argument is deprecated and will be removed in a future version, use" - " `labels` instead.", - FutureWarning, - ) - labels = kwargs.pop("next_sentence_label") - - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - outputs = self.fnet( - input_ids, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - pooled_output = outputs[1] - - seq_relationship_scores = self.cls(pooled_output) - - next_sentence_loss = None - if labels is not None: - loss_fct = CrossEntropyLoss() - next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1)) - - if not return_dict: - output = (seq_relationship_scores,) + outputs[2:] - return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output - - return NextSentencePredictorOutput( - loss=next_sentence_loss, - logits=seq_relationship_scores, - hidden_states=outputs.hidden_states, - ) - - -@add_start_docstrings( - """ - FNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled - output) e.g. for GLUE tasks. - """, - FNET_START_DOCSTRING, -) -class FNetForSequenceClassification(FNetPreTrainedModel): - def __init__(self, config): - super().__init__(config) - self.num_labels = config.num_labels - self.fnet = FNetModel(config) - - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, config.num_labels) - - # Initialize weights and apply final processing - self.post_init() - - @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=SequenceClassifierOutput, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, SequenceClassifierOutput]: - r""" - labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., - config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If - `config.num_labels > 1` a classification loss is computed (Cross-Entropy). - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - outputs = self.fnet( - input_ids, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - pooled_output = outputs[1] - pooled_output = self.dropout(pooled_output) - logits = self.classifier(pooled_output) - - loss = None - if labels is not None: - if self.config.problem_type is None: - if self.num_labels == 1: - self.config.problem_type = "regression" - elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): - self.config.problem_type = "single_label_classification" - else: - self.config.problem_type = "multi_label_classification" - - if self.config.problem_type == "regression": - loss_fct = MSELoss() - if self.num_labels == 1: - loss = loss_fct(logits.squeeze(), labels.squeeze()) - else: - loss = loss_fct(logits, labels) - elif self.config.problem_type == "single_label_classification": - loss_fct = CrossEntropyLoss() - loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) - elif self.config.problem_type == "multi_label_classification": - loss_fct = BCEWithLogitsLoss() - loss = loss_fct(logits, labels) - if not return_dict: - output = (logits,) + outputs[2:] - return ((loss,) + output) if loss is not None else output - - return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states) - - -@add_start_docstrings( - """ - FNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a - softmax) e.g. for RocStories/SWAG tasks. - """, - FNET_START_DOCSTRING, -) -class FNetForMultipleChoice(FNetPreTrainedModel): - def __init__(self, config): - super().__init__(config) - - self.fnet = FNetModel(config) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, 1) - - # Initialize weights and apply final processing - self.post_init() - - @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=MultipleChoiceModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, MultipleChoiceModelOutput]: - r""" - labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., - num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See - `input_ids` above) - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] - - input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None - token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None - position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None - inputs_embeds = ( - inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) - if inputs_embeds is not None - else None - ) - - outputs = self.fnet( - input_ids, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - pooled_output = outputs[1] - - pooled_output = self.dropout(pooled_output) - logits = self.classifier(pooled_output) - reshaped_logits = logits.view(-1, num_choices) - - loss = None - if labels is not None: - loss_fct = CrossEntropyLoss() - loss = loss_fct(reshaped_logits, labels) - - if not return_dict: - output = (reshaped_logits,) + outputs[2:] - return ((loss,) + output) if loss is not None else output - - return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states) - - -@add_start_docstrings( - """ - FNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for - Named-Entity-Recognition (NER) tasks. - """, - FNET_START_DOCSTRING, -) -class FNetForTokenClassification(FNetPreTrainedModel): - def __init__(self, config): - super().__init__(config) - self.num_labels = config.num_labels - - self.fnet = FNetModel(config) - - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, config.num_labels) - - # Initialize weights and apply final processing - self.post_init() - - @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TokenClassifierOutput, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, TokenClassifierOutput]: - r""" - labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - outputs = self.fnet( - input_ids, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - sequence_output = outputs[0] - - sequence_output = self.dropout(sequence_output) - logits = self.classifier(sequence_output) - - loss = None - if labels is not None: - loss_fct = CrossEntropyLoss() - # Only keep active parts of the loss - loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) - - if not return_dict: - output = (logits,) + outputs[2:] - return ((loss,) + output) if loss is not None else output - - return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states) - - -@add_start_docstrings( - """ - FNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear - layers on top of the hidden-states output to compute `span start logits` and `span end logits`). - """, - FNET_START_DOCSTRING, -) -class FNetForQuestionAnswering(FNetPreTrainedModel): - def __init__(self, config): - super().__init__(config) - - self.num_labels = config.num_labels - - self.fnet = FNetModel(config) - self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - - # Initialize weights and apply final processing - self.post_init() - - @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=QuestionAnsweringModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - start_positions: Optional[torch.Tensor] = None, - end_positions: Optional[torch.Tensor] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, QuestionAnsweringModelOutput]: - r""" - start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for position (index) of the start of the labelled span for computing the token classification loss. - Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence - are not taken into account for computing the loss. - end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for position (index) of the end of the labelled span for computing the token classification loss. - Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence - are not taken into account for computing the loss. - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - outputs = self.fnet( - input_ids, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - sequence_output = outputs[0] - - logits = self.qa_outputs(sequence_output) - start_logits, end_logits = logits.split(1, dim=-1) - start_logits = start_logits.squeeze(-1).contiguous() - end_logits = end_logits.squeeze(-1).contiguous() - - total_loss = None - if start_positions is not None and end_positions is not None: - # If we are on multi-GPU, split add a dimension - if len(start_positions.size()) > 1: - start_positions = start_positions.squeeze(-1) - if len(end_positions.size()) > 1: - end_positions = end_positions.squeeze(-1) - # sometimes the start/end positions are outside our model inputs, we ignore these terms - ignored_index = start_logits.size(1) - start_positions = start_positions.clamp(0, ignored_index) - end_positions = end_positions.clamp(0, ignored_index) - - loss_fct = CrossEntropyLoss(ignore_index=ignored_index) - start_loss = loss_fct(start_logits, start_positions) - end_loss = loss_fct(end_logits, end_positions) - total_loss = (start_loss + end_loss) / 2 - - if not return_dict: - output = (start_logits, end_logits) + outputs[2:] - return ((total_loss,) + output) if total_loss is not None else output - - return QuestionAnsweringModelOutput( - loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states - ) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/focalnet/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/focalnet/__init__.py deleted file mode 100644 index b043a006f9376609c774e84f5376323f48f2cae7..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/focalnet/__init__.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import TYPE_CHECKING - -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_focalnet"] = [ - "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", - "FocalNetForImageClassification", - "FocalNetForMaskedImageModeling", - "FocalNetBackbone", - "FocalNetModel", - "FocalNetPreTrainedModel", - ] - -if TYPE_CHECKING: - from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_focalnet import ( - FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, - FocalNetBackbone, - FocalNetForImageClassification, - FocalNetForMaskedImageModeling, - FocalNetModel, - FocalNetPreTrainedModel, - ) - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/m2m_100/tokenization_m2m_100.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/m2m_100/tokenization_m2m_100.py deleted file mode 100644 index 1346af81412add53b2ed07287fd840079992872a..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/m2m_100/tokenization_m2m_100.py +++ /dev/null @@ -1,398 +0,0 @@ -# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tokenization classes for M2M100.""" -import json -import os -from pathlib import Path -from shutil import copyfile -from typing import Any, Dict, List, Optional, Tuple, Union - -import sentencepiece - -from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer -from ...utils import logging - - -logger = logging.get_logger(__name__) - -SPIECE_UNDERLINE = "▁" - -VOCAB_FILES_NAMES = { - "vocab_file": "vocab.json", - "spm_file": "sentencepiece.bpe.model", - "tokenizer_config_file": "tokenizer_config.json", -} - -PRETRAINED_VOCAB_FILES_MAP = { - "vocab_file": { - "facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json", - "facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json", - }, - "spm_file": { - "facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model", - "facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model", - }, - "tokenizer_config_file": { - "facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json", - "facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json", - }, -} - -PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { - "facebook/m2m100_418M": 1024, -} - -# fmt: off -FAIRSEQ_LANGUAGE_CODES = { - "m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"], - "wmt21": ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de'] -} -# fmt: on - - -class M2M100Tokenizer(PreTrainedTokenizer): - """ - Construct an M2M100 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). - - This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to - this superclass for more information regarding those methods. - - Args: - vocab_file (`str`): - Path to the vocabulary file. - spm_file (`str`): - Path to [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that - contains the vocabulary. - src_lang (`str`, *optional*): - A string representing the source language. - tgt_lang (`str`, *optional*): - A string representing the target language. - eos_token (`str`, *optional*, defaults to `""`): - The end of sequence token. - sep_token (`str`, *optional*, defaults to `""`): - The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for - sequence classification or for a text and a question for question answering. It is also used as the last - token of a sequence built with special tokens. - unk_token (`str`, *optional*, defaults to `""`): - The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this - token instead. - pad_token (`str`, *optional*, defaults to `""`): - The token used for padding, for example when batching sequences of different lengths. - language_codes (`str`, *optional*, defaults to `"m2m100"`): - What language codes to use. Should be one of `"m2m100"` or `"wmt21"`. - sp_model_kwargs (`dict`, *optional*): - Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for - SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, - to set: - - - `enable_sampling`: Enable subword regularization. - - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - - - `nbest_size = {0,1}`: No sampling is performed. - - `nbest_size > 1`: samples from the nbest_size results. - - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) - using forward-filtering-and-backward-sampling algorithm. - - - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for - BPE-dropout. - - Examples: - - ```python - >>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer - - >>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") - >>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="en", tgt_lang="ro") - >>> src_text = " UN Chief Says There Is No Military Solution in Syria" - >>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria" - >>> model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt") - >>> outputs = model(**model_inputs) # should work - ```""" - - vocab_files_names = VOCAB_FILES_NAMES - max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES - pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - model_input_names = ["input_ids", "attention_mask"] - - prefix_tokens: List[int] = [] - suffix_tokens: List[int] = [] - - def __init__( - self, - vocab_file, - spm_file, - src_lang=None, - tgt_lang=None, - bos_token="", - eos_token="", - sep_token="", - pad_token="", - unk_token="", - language_codes="m2m100", - sp_model_kwargs: Optional[Dict[str, Any]] = None, - num_madeup_words=8, - **kwargs, - ) -> None: - self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs - - self.language_codes = language_codes - fairseq_language_code = FAIRSEQ_LANGUAGE_CODES[language_codes] - self.lang_code_to_token = {lang_code: f"__{lang_code}__" for lang_code in fairseq_language_code} - - additional_special_tokens = kwargs.pop("additional_special_tokens", []) - for lang_code in fairseq_language_code: - token = self.get_lang_token(lang_code) - if token not in additional_special_tokens and lang_code not in str(token) not in self.added_tokens_encoder: - additional_special_tokens.append(token) - - self.vocab_file = vocab_file - self.encoder = load_json(vocab_file) - self.decoder = {v: k for k, v in self.encoder.items()} - self.spm_file = spm_file - self.sp_model = load_spm(spm_file, self.sp_model_kwargs) - - self.encoder_size = len(self.encoder) - - self.lang_token_to_id = { - self.get_lang_token(lang_code): self.encoder_size + i for i, lang_code in enumerate(fairseq_language_code) - } - self.lang_code_to_id = {lang_code: self.encoder_size + i for i, lang_code in enumerate(fairseq_language_code)} - self.id_to_lang_token = {v: k for k, v in self.lang_token_to_id.items()} - - self._src_lang = src_lang if src_lang is not None else "en" - self.tgt_lang = tgt_lang - self.cur_lang_id = self.get_lang_id(self._src_lang) - - self.num_madeup_words = num_madeup_words - - super().__init__( - src_lang=src_lang, - tgt_lang=tgt_lang, - bos_token=bos_token, - eos_token=eos_token, - sep_token=sep_token, - unk_token=unk_token, - pad_token=pad_token, - language_codes=language_codes, - sp_model_kwargs=self.sp_model_kwargs, - additional_special_tokens=additional_special_tokens, - num_madeup_words=num_madeup_words, - **kwargs, - ) - self.set_src_lang_special_tokens(self._src_lang) - - @property - def vocab_size(self) -> int: - return len(self.encoder) - - def get_vocab(self) -> Dict: - vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} - vocab.update(self.added_tokens_encoder) - return vocab - - @property - def src_lang(self) -> str: - return self._src_lang - - @src_lang.setter - def src_lang(self, new_src_lang: str) -> None: - self._src_lang = new_src_lang - self.set_src_lang_special_tokens(self._src_lang) - - def _tokenize(self, text: str) -> List[str]: - return self.sp_model.encode(text, out_type=str) - - def _convert_token_to_id(self, token): - if token in self.lang_token_to_id: - return self.lang_token_to_id[token] - return self.encoder.get(token, self.encoder[self.unk_token]) - - def _convert_id_to_token(self, index: int) -> str: - """Converts an index (integer) in a token (str) using the decoder.""" - if index in self.id_to_lang_token: - return self.id_to_lang_token[index] - return self.decoder.get(index, self.unk_token) - - def convert_tokens_to_string(self, tokens): - """Converts a sequence of tokens (string) in a single string.""" - current_sub_tokens = [] - out_string = "" - for token in tokens: - # make sure that special tokens are not decoded using sentencepiece model - if token in self.all_special_tokens: - out_string += self.sp_model.decode(current_sub_tokens) + token - current_sub_tokens = [] - else: - current_sub_tokens.append(token) - out_string += self.sp_model.decode(current_sub_tokens) - return out_string.strip() - - def get_special_tokens_mask( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False - ) -> List[int]: - """ - Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding - special tokens using the tokenizer `prepare_for_model` method. - - Args: - token_ids_0 (`List[int]`): - List of IDs. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - already_has_special_tokens (`bool`, *optional*, defaults to `False`): - Whether or not the token list is already formatted with special tokens for the model. - - Returns: - `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. - """ - - if already_has_special_tokens: - return super().get_special_tokens_mask( - token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True - ) - - prefix_ones = [1] * len(self.prefix_tokens) - suffix_ones = [1] * len(self.suffix_tokens) - if token_ids_1 is None: - return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones - return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones - - def build_inputs_with_special_tokens( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and - adding special tokens. An MBART sequence has the following format, where `X` represents the sequence: - - - `input_ids` (for encoder) `X [eos, src_lang_code]` - - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` - - BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a - separator. - - Args: - token_ids_0 (`List[int]`): - List of IDs to which the special tokens will be added. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. - """ - if token_ids_1 is None: - return self.prefix_tokens + token_ids_0 + self.suffix_tokens - # We don't expect to process pairs, but leave the pair logic for API consistency - return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens - - def __getstate__(self) -> Dict: - state = self.__dict__.copy() - state["sp_model"] = None - return state - - def __setstate__(self, d: Dict) -> None: - self.__dict__ = d - - # for backward compatibility - if not hasattr(self, "sp_model_kwargs"): - self.sp_model_kwargs = {} - - self.sp_model = load_spm(self.spm_file, self.sp_model_kwargs) - - def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: - save_dir = Path(save_directory) - if not save_dir.is_dir(): - raise OSError(f"{save_directory} should be a directory") - vocab_save_path = save_dir / ( - (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"] - ) - spm_save_path = save_dir / ( - (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"] - ) - - save_json(self.encoder, vocab_save_path) - - if os.path.abspath(self.spm_file) != os.path.abspath(spm_save_path) and os.path.isfile(self.spm_file): - copyfile(self.spm_file, spm_save_path) - elif not os.path.isfile(self.spm_file): - with open(spm_save_path, "wb") as fi: - content_spiece_model = self.sp_model.serialized_model_proto() - fi.write(content_spiece_model) - - return (str(vocab_save_path), str(spm_save_path)) - - def prepare_seq2seq_batch( - self, - src_texts: List[str], - src_lang: str = "en", - tgt_texts: Optional[List[str]] = None, - tgt_lang: str = "ro", - **kwargs, - ) -> BatchEncoding: - self.src_lang = src_lang - self.tgt_lang = tgt_lang - self.set_src_lang_special_tokens(self.src_lang) - return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) - - def _build_translation_inputs(self, raw_inputs, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs): - """Used by translation pipeline, to prepare inputs for the generate function""" - if src_lang is None or tgt_lang is None: - raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") - self.src_lang = src_lang - inputs = self(raw_inputs, add_special_tokens=True, **extra_kwargs) - tgt_lang_id = self.get_lang_id(tgt_lang) - inputs["forced_bos_token_id"] = tgt_lang_id - return inputs - - def _switch_to_input_mode(self): - self.set_src_lang_special_tokens(self.src_lang) - - def _switch_to_target_mode(self): - self.set_tgt_lang_special_tokens(self.tgt_lang) - - def set_src_lang_special_tokens(self, src_lang: str) -> None: - """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code].""" - lang_token = self.get_lang_token(src_lang) - self.cur_lang_id = self.lang_token_to_id[lang_token] - self.prefix_tokens = [self.cur_lang_id] - self.suffix_tokens = [self.eos_token_id] - - def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None: - """Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code].""" - lang_token = self.get_lang_token(tgt_lang) - self.cur_lang_id = self.lang_token_to_id[lang_token] - self.prefix_tokens = [self.cur_lang_id] - self.suffix_tokens = [self.eos_token_id] - - def get_lang_token(self, lang: str) -> str: - return self.lang_code_to_token[lang] - - def get_lang_id(self, lang: str) -> int: - lang_token = self.get_lang_token(lang) - return self.lang_token_to_id[lang_token] - - -def load_spm(path: str, sp_model_kwargs: Dict[str, Any]) -> sentencepiece.SentencePieceProcessor: - spm = sentencepiece.SentencePieceProcessor(**sp_model_kwargs) - spm.Load(str(path)) - return spm - - -def load_json(path: str) -> Union[Dict, List]: - with open(path, "r") as f: - return json.load(f) - - -def save_json(data, path: str) -> None: - with open(path, "w") as f: - json.dump(data, f, indent=2) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/mt5/modeling_tf_mt5.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/mt5/modeling_tf_mt5.py deleted file mode 100644 index ba7bd33c344747db537150e3f7d6326f6bb7143f..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/mt5/modeling_tf_mt5.py +++ /dev/null @@ -1,94 +0,0 @@ -# coding=utf-8 -# Copyright 2020 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Tensorflow mT5 model.""" - -from ...utils import logging -from ..t5.modeling_tf_t5 import TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model -from .configuration_mt5 import MT5Config - - -logger = logging.get_logger(__name__) - -_CONFIG_FOR_DOC = "T5Config" - - -class TFMT5Model(TFT5Model): - r""" - This class overrides [`TFT5Model`]. Please check the superclass for the appropriate documentation alongside usage - examples. - - Examples: - - ```python - >>> from transformers import TFMT5Model, AutoTokenizer - - >>> model = TFMT5Model.from_pretrained("google/mt5-small") - >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") - >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." - >>> summary = "Weiter Verhandlung in Syrien." - >>> inputs = tokenizer(article, return_tensors="tf") - >>> labels = tokenizer(text_target=summary, return_tensors="tf") - - >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"]) - >>> hidden_states = outputs.last_hidden_state - ```""" - model_type = "mt5" - config_class = MT5Config - - -class TFMT5ForConditionalGeneration(TFT5ForConditionalGeneration): - r""" - This class overrides [`TFT5ForConditionalGeneration`]. Please check the superclass for the appropriate - documentation alongside usage examples. - - Examples: - - ```python - >>> from transformers import TFMT5ForConditionalGeneration, AutoTokenizer - - >>> model = TFMT5ForConditionalGeneration.from_pretrained("google/mt5-small") - >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") - >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." - >>> summary = "Weiter Verhandlung in Syrien." - >>> inputs = tokenizer(article, text_target=summary, return_tensors="tf") - - >>> outputs = model(**inputs) - >>> loss = outputs.loss - ```""" - - model_type = "mt5" - config_class = MT5Config - - -class TFMT5EncoderModel(TFT5EncoderModel): - r""" - This class overrides [`TFT5EncoderModel`]. Please check the superclass for the appropriate documentation alongside - usage examples. - - Examples: - - ```python - >>> from transformers import TFMT5EncoderModel, AutoTokenizer - - >>> model = TFMT5EncoderModel.from_pretrained("google/mt5-small") - >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") - >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." - >>> input_ids = tokenizer(article, return_tensors="tf").input_ids - >>> outputs = model(input_ids) - >>> hidden_state = outputs.last_hidden_state - ```""" - - model_type = "mt5" - config_class = MT5Config diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/diffusion/diffusion_onnx.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/diffusion/diffusion_onnx.py deleted file mode 100644 index 1c1e80321de162b5233801efa3423739f7f92bdc..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/diffusion/diffusion_onnx.py +++ /dev/null @@ -1,612 +0,0 @@ -from collections import deque -from functools import partial -from inspect import isfunction -import torch.nn.functional as F -import librosa.sequence -import numpy as np -from torch.nn import Conv1d -from torch.nn import Mish -import torch -from torch import nn -from tqdm import tqdm -import math - - -def exists(x): - return x is not None - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def extract(a, t): - return a[t].reshape((1, 1, 1, 1)) - - -def noise_like(shape, device, repeat=False): - repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) - noise = lambda: torch.randn(shape, device=device) - return repeat_noise() if repeat else noise() - - -def linear_beta_schedule(timesteps, max_beta=0.02): - """ - linear schedule - """ - betas = np.linspace(1e-4, max_beta, timesteps) - return betas - - -def cosine_beta_schedule(timesteps, s=0.008): - """ - cosine schedule - as proposed in https://openreview.net/forum?id=-NEXDKk8gZ - """ - steps = timesteps + 1 - x = np.linspace(0, steps, steps) - alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2 - alphas_cumprod = alphas_cumprod / alphas_cumprod[0] - betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1]) - return np.clip(betas, a_min=0, a_max=0.999) - - -beta_schedule = { - "cosine": cosine_beta_schedule, - "linear": linear_beta_schedule, -} - - -def extract_1(a, t): - return a[t].reshape((1, 1, 1, 1)) - - -def predict_stage0(noise_pred, noise_pred_prev): - return (noise_pred + noise_pred_prev) / 2 - - -def predict_stage1(noise_pred, noise_list): - return (noise_pred * 3 - - noise_list[-1]) / 2 - - -def predict_stage2(noise_pred, noise_list): - return (noise_pred * 23 - - noise_list[-1] * 16 - + noise_list[-2] * 5) / 12 - - -def predict_stage3(noise_pred, noise_list): - return (noise_pred * 55 - - noise_list[-1] * 59 - + noise_list[-2] * 37 - - noise_list[-3] * 9) / 24 - - -class SinusoidalPosEmb(nn.Module): - def __init__(self, dim): - super().__init__() - self.dim = dim - self.half_dim = dim // 2 - self.emb = 9.21034037 / (self.half_dim - 1) - self.emb = torch.exp(torch.arange(self.half_dim) * torch.tensor(-self.emb)).unsqueeze(0) - self.emb = self.emb.cpu() - - def forward(self, x): - emb = self.emb * x - emb = torch.cat((emb.sin(), emb.cos()), dim=-1) - return emb - - -class ResidualBlock(nn.Module): - def __init__(self, encoder_hidden, residual_channels, dilation): - super().__init__() - self.residual_channels = residual_channels - self.dilated_conv = Conv1d(residual_channels, 2 * residual_channels, 3, padding=dilation, dilation=dilation) - self.diffusion_projection = nn.Linear(residual_channels, residual_channels) - self.conditioner_projection = Conv1d(encoder_hidden, 2 * residual_channels, 1) - self.output_projection = Conv1d(residual_channels, 2 * residual_channels, 1) - - def forward(self, x, conditioner, diffusion_step): - diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1) - conditioner = self.conditioner_projection(conditioner) - y = x + diffusion_step - y = self.dilated_conv(y) + conditioner - - gate, filter_1 = torch.split(y, [self.residual_channels, self.residual_channels], dim=1) - - y = torch.sigmoid(gate) * torch.tanh(filter_1) - y = self.output_projection(y) - - residual, skip = torch.split(y, [self.residual_channels, self.residual_channels], dim=1) - - return (x + residual) / 1.41421356, skip - - -class DiffNet(nn.Module): - def __init__(self, in_dims, n_layers, n_chans, n_hidden): - super().__init__() - self.encoder_hidden = n_hidden - self.residual_layers = n_layers - self.residual_channels = n_chans - self.input_projection = Conv1d(in_dims, self.residual_channels, 1) - self.diffusion_embedding = SinusoidalPosEmb(self.residual_channels) - dim = self.residual_channels - self.mlp = nn.Sequential( - nn.Linear(dim, dim * 4), - Mish(), - nn.Linear(dim * 4, dim) - ) - self.residual_layers = nn.ModuleList([ - ResidualBlock(self.encoder_hidden, self.residual_channels, 1) - for i in range(self.residual_layers) - ]) - self.skip_projection = Conv1d(self.residual_channels, self.residual_channels, 1) - self.output_projection = Conv1d(self.residual_channels, in_dims, 1) - nn.init.zeros_(self.output_projection.weight) - - def forward(self, spec, diffusion_step, cond): - x = spec.squeeze(0) - x = self.input_projection(x) # x [B, residual_channel, T] - x = F.relu(x) - # skip = torch.randn_like(x) - diffusion_step = diffusion_step.float() - diffusion_step = self.diffusion_embedding(diffusion_step) - diffusion_step = self.mlp(diffusion_step) - - x, skip = self.residual_layers[0](x, cond, diffusion_step) - # noinspection PyTypeChecker - for layer in self.residual_layers[1:]: - x, skip_connection = layer.forward(x, cond, diffusion_step) - skip = skip + skip_connection - x = skip / math.sqrt(len(self.residual_layers)) - x = self.skip_projection(x) - x = F.relu(x) - x = self.output_projection(x) # [B, 80, T] - return x.unsqueeze(1) - - -class AfterDiffusion(nn.Module): - def __init__(self, spec_max, spec_min, v_type='a'): - super().__init__() - self.spec_max = spec_max - self.spec_min = spec_min - self.type = v_type - - def forward(self, x): - x = x.squeeze(1).permute(0, 2, 1) - mel_out = (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min - if self.type == 'nsf-hifigan-log10': - mel_out = mel_out * 0.434294 - return mel_out.transpose(2, 1) - - -class Pred(nn.Module): - def __init__(self, alphas_cumprod): - super().__init__() - self.alphas_cumprod = alphas_cumprod - - def forward(self, x_1, noise_t, t_1, t_prev): - a_t = extract(self.alphas_cumprod, t_1).cpu() - a_prev = extract(self.alphas_cumprod, t_prev).cpu() - a_t_sq, a_prev_sq = a_t.sqrt().cpu(), a_prev.sqrt().cpu() - x_delta = (a_prev - a_t) * ((1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x_1 - 1 / ( - a_t_sq * (((1 - a_prev) * a_t).sqrt() + ((1 - a_t) * a_prev).sqrt())) * noise_t) - x_pred = x_1 + x_delta.cpu() - - return x_pred - - -class GaussianDiffusion(nn.Module): - def __init__(self, - out_dims=128, - n_layers=20, - n_chans=384, - n_hidden=256, - timesteps=1000, - k_step=1000, - max_beta=0.02, - spec_min=-12, - spec_max=2): - super().__init__() - self.denoise_fn = DiffNet(out_dims, n_layers, n_chans, n_hidden) - self.out_dims = out_dims - self.mel_bins = out_dims - self.n_hidden = n_hidden - betas = beta_schedule['linear'](timesteps, max_beta=max_beta) - - alphas = 1. - betas - alphas_cumprod = np.cumprod(alphas, axis=0) - alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) - timesteps, = betas.shape - self.num_timesteps = int(timesteps) - self.k_step = k_step - - self.noise_list = deque(maxlen=4) - - to_torch = partial(torch.tensor, dtype=torch.float32) - - self.register_buffer('betas', to_torch(betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) - - # calculations for posterior q(x_{t-1} | x_t, x_0) - posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod) - # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) - self.register_buffer('posterior_variance', to_torch(posterior_variance)) - # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain - self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) - self.register_buffer('posterior_mean_coef1', to_torch( - betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) - self.register_buffer('posterior_mean_coef2', to_torch( - (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) - - self.register_buffer('spec_min', torch.FloatTensor([spec_min])[None, None, :out_dims]) - self.register_buffer('spec_max', torch.FloatTensor([spec_max])[None, None, :out_dims]) - self.ad = AfterDiffusion(self.spec_max, self.spec_min) - self.xp = Pred(self.alphas_cumprod) - - def q_mean_variance(self, x_start, t): - mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start - variance = extract(1. - self.alphas_cumprod, t, x_start.shape) - log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape) - return mean, variance, log_variance - - def predict_start_from_noise(self, x_t, t, noise): - return ( - extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - - extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise - ) - - def q_posterior(self, x_start, x_t, t): - posterior_mean = ( - extract(self.posterior_mean_coef1, t, x_t.shape) * x_start + - extract(self.posterior_mean_coef2, t, x_t.shape) * x_t - ) - posterior_variance = extract(self.posterior_variance, t, x_t.shape) - posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape) - return posterior_mean, posterior_variance, posterior_log_variance_clipped - - def p_mean_variance(self, x, t, cond): - noise_pred = self.denoise_fn(x, t, cond=cond) - x_recon = self.predict_start_from_noise(x, t=t, noise=noise_pred) - - x_recon.clamp_(-1., 1.) - - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, t, cond, clip_denoised=True, repeat_noise=False): - b, *_, device = *x.shape, x.device - model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, cond=cond) - noise = noise_like(x.shape, device, repeat_noise) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def p_sample_plms(self, x, t, interval, cond, clip_denoised=True, repeat_noise=False): - """ - Use the PLMS method from - [Pseudo Numerical Methods for Diffusion Models on Manifolds](https://arxiv.org/abs/2202.09778). - """ - - def get_x_pred(x, noise_t, t): - a_t = extract(self.alphas_cumprod, t) - a_prev = extract(self.alphas_cumprod, torch.max(t - interval, torch.zeros_like(t))) - a_t_sq, a_prev_sq = a_t.sqrt(), a_prev.sqrt() - - x_delta = (a_prev - a_t) * ((1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x - 1 / ( - a_t_sq * (((1 - a_prev) * a_t).sqrt() + ((1 - a_t) * a_prev).sqrt())) * noise_t) - x_pred = x + x_delta - - return x_pred - - noise_list = self.noise_list - noise_pred = self.denoise_fn(x, t, cond=cond) - - if len(noise_list) == 0: - x_pred = get_x_pred(x, noise_pred, t) - noise_pred_prev = self.denoise_fn(x_pred, max(t - interval, 0), cond=cond) - noise_pred_prime = (noise_pred + noise_pred_prev) / 2 - elif len(noise_list) == 1: - noise_pred_prime = (3 * noise_pred - noise_list[-1]) / 2 - elif len(noise_list) == 2: - noise_pred_prime = (23 * noise_pred - 16 * noise_list[-1] + 5 * noise_list[-2]) / 12 - else: - noise_pred_prime = (55 * noise_pred - 59 * noise_list[-1] + 37 * noise_list[-2] - 9 * noise_list[-3]) / 24 - - x_prev = get_x_pred(x, noise_pred_prime, t) - noise_list.append(noise_pred) - - return x_prev - - def q_sample(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - return ( - extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + - extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise - ) - - def p_losses(self, x_start, t, cond, noise=None, loss_type='l2'): - noise = default(noise, lambda: torch.randn_like(x_start)) - - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - x_recon = self.denoise_fn(x_noisy, t, cond) - - if loss_type == 'l1': - loss = (noise - x_recon).abs().mean() - elif loss_type == 'l2': - loss = F.mse_loss(noise, x_recon) - else: - raise NotImplementedError() - - return loss - - def org_forward(self, - condition, - init_noise=None, - gt_spec=None, - infer=True, - infer_speedup=100, - method='pndm', - k_step=1000, - use_tqdm=True): - """ - conditioning diffusion, use fastspeech2 encoder output as the condition - """ - cond = condition - b, device = condition.shape[0], condition.device - if not infer: - spec = self.norm_spec(gt_spec) - t = torch.randint(0, self.k_step, (b,), device=device).long() - norm_spec = spec.transpose(1, 2)[:, None, :, :] # [B, 1, M, T] - return self.p_losses(norm_spec, t, cond=cond) - else: - shape = (cond.shape[0], 1, self.out_dims, cond.shape[2]) - - if gt_spec is None: - t = self.k_step - if init_noise is None: - x = torch.randn(shape, device=device) - else: - x = init_noise - else: - t = k_step - norm_spec = self.norm_spec(gt_spec) - norm_spec = norm_spec.transpose(1, 2)[:, None, :, :] - x = self.q_sample(x_start=norm_spec, t=torch.tensor([t - 1], device=device).long()) - - if method is not None and infer_speedup > 1: - if method == 'dpm-solver': - from .dpm_solver_pytorch import NoiseScheduleVP, model_wrapper, DPM_Solver - # 1. Define the noise schedule. - noise_schedule = NoiseScheduleVP(schedule='discrete', betas=self.betas[:t]) - - # 2. Convert your discrete-time `model` to the continuous-time - # noise prediction model. Here is an example for a diffusion model - # `model` with the noise prediction type ("noise") . - def my_wrapper(fn): - def wrapped(x, t, **kwargs): - ret = fn(x, t, **kwargs) - if use_tqdm: - self.bar.update(1) - return ret - - return wrapped - - model_fn = model_wrapper( - my_wrapper(self.denoise_fn), - noise_schedule, - model_type="noise", # or "x_start" or "v" or "score" - model_kwargs={"cond": cond} - ) - - # 3. Define dpm-solver and sample by singlestep DPM-Solver. - # (We recommend singlestep DPM-Solver for unconditional sampling) - # You can adjust the `steps` to balance the computation - # costs and the sample quality. - dpm_solver = DPM_Solver(model_fn, noise_schedule) - - steps = t // infer_speedup - if use_tqdm: - self.bar = tqdm(desc="sample time step", total=steps) - x = dpm_solver.sample( - x, - steps=steps, - order=3, - skip_type="time_uniform", - method="singlestep", - ) - if use_tqdm: - self.bar.close() - elif method == 'pndm': - self.noise_list = deque(maxlen=4) - if use_tqdm: - for i in tqdm( - reversed(range(0, t, infer_speedup)), desc='sample time step', - total=t // infer_speedup, - ): - x = self.p_sample_plms( - x, torch.full((b,), i, device=device, dtype=torch.long), - infer_speedup, cond=cond - ) - else: - for i in reversed(range(0, t, infer_speedup)): - x = self.p_sample_plms( - x, torch.full((b,), i, device=device, dtype=torch.long), - infer_speedup, cond=cond - ) - else: - raise NotImplementedError(method) - else: - if use_tqdm: - for i in tqdm(reversed(range(0, t)), desc='sample time step', total=t): - x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond) - else: - for i in reversed(range(0, t)): - x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond) - x = x.squeeze(1).transpose(1, 2) # [B, T, M] - return self.denorm_spec(x).transpose(2, 1) - - def norm_spec(self, x): - return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1 - - def denorm_spec(self, x): - return (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min - - def get_x_pred(self, x_1, noise_t, t_1, t_prev): - a_t = extract(self.alphas_cumprod, t_1) - a_prev = extract(self.alphas_cumprod, t_prev) - a_t_sq, a_prev_sq = a_t.sqrt(), a_prev.sqrt() - x_delta = (a_prev - a_t) * ((1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x_1 - 1 / ( - a_t_sq * (((1 - a_prev) * a_t).sqrt() + ((1 - a_t) * a_prev).sqrt())) * noise_t) - x_pred = x_1 + x_delta - return x_pred - - def OnnxExport(self, project_name=None, init_noise=None, hidden_channels=256, export_denoise=True, export_pred=True, export_after=True): - cond = torch.randn([1, self.n_hidden, 10]).cpu() - if init_noise is None: - x = torch.randn((1, 1, self.mel_bins, cond.shape[2]), dtype=torch.float32).cpu() - else: - x = init_noise - pndms = 100 - - org_y_x = self.org_forward(cond, init_noise=x) - - device = cond.device - n_frames = cond.shape[2] - step_range = torch.arange(0, self.k_step, pndms, dtype=torch.long, device=device).flip(0) - plms_noise_stage = torch.tensor(0, dtype=torch.long, device=device) - noise_list = torch.zeros((0, 1, 1, self.mel_bins, n_frames), device=device) - - ot = step_range[0] - ot_1 = torch.full((1,), ot, device=device, dtype=torch.long) - if export_denoise: - torch.onnx.export( - self.denoise_fn, - (x.cpu(), ot_1.cpu(), cond.cpu()), - f"{project_name}_denoise.onnx", - input_names=["noise", "time", "condition"], - output_names=["noise_pred"], - dynamic_axes={ - "noise": [3], - "condition": [2] - }, - opset_version=16 - ) - - for t in step_range: - t_1 = torch.full((1,), t, device=device, dtype=torch.long) - noise_pred = self.denoise_fn(x, t_1, cond) - t_prev = t_1 - pndms - t_prev = t_prev * (t_prev > 0) - if plms_noise_stage == 0: - if export_pred: - torch.onnx.export( - self.xp, - (x.cpu(), noise_pred.cpu(), t_1.cpu(), t_prev.cpu()), - f"{project_name}_pred.onnx", - input_names=["noise", "noise_pred", "time", "time_prev"], - output_names=["noise_pred_o"], - dynamic_axes={ - "noise": [3], - "noise_pred": [3] - }, - opset_version=16 - ) - - x_pred = self.get_x_pred(x, noise_pred, t_1, t_prev) - noise_pred_prev = self.denoise_fn(x_pred, t_prev, cond=cond) - noise_pred_prime = predict_stage0(noise_pred, noise_pred_prev) - - elif plms_noise_stage == 1: - noise_pred_prime = predict_stage1(noise_pred, noise_list) - - elif plms_noise_stage == 2: - noise_pred_prime = predict_stage2(noise_pred, noise_list) - - else: - noise_pred_prime = predict_stage3(noise_pred, noise_list) - - noise_pred = noise_pred.unsqueeze(0) - - if plms_noise_stage < 3: - noise_list = torch.cat((noise_list, noise_pred), dim=0) - plms_noise_stage = plms_noise_stage + 1 - - else: - noise_list = torch.cat((noise_list[-2:], noise_pred), dim=0) - - x = self.get_x_pred(x, noise_pred_prime, t_1, t_prev) - if export_after: - torch.onnx.export( - self.ad, - x.cpu(), - f"{project_name}_after.onnx", - input_names=["x"], - output_names=["mel_out"], - dynamic_axes={ - "x": [3] - }, - opset_version=16 - ) - x = self.ad(x) - - print((x == org_y_x).all()) - return x - - def forward(self, condition=None, init_noise=None, pndms=None, k_step=None): - cond = condition - x = init_noise - - device = cond.device - n_frames = cond.shape[2] - step_range = torch.arange(0, k_step.item(), pndms.item(), dtype=torch.long, device=device).flip(0) - plms_noise_stage = torch.tensor(0, dtype=torch.long, device=device) - noise_list = torch.zeros((0, 1, 1, self.mel_bins, n_frames), device=device) - - ot = step_range[0] - ot_1 = torch.full((1,), ot, device=device, dtype=torch.long) - - for t in step_range: - t_1 = torch.full((1,), t, device=device, dtype=torch.long) - noise_pred = self.denoise_fn(x, t_1, cond) - t_prev = t_1 - pndms - t_prev = t_prev * (t_prev > 0) - if plms_noise_stage == 0: - x_pred = self.get_x_pred(x, noise_pred, t_1, t_prev) - noise_pred_prev = self.denoise_fn(x_pred, t_prev, cond=cond) - noise_pred_prime = predict_stage0(noise_pred, noise_pred_prev) - - elif plms_noise_stage == 1: - noise_pred_prime = predict_stage1(noise_pred, noise_list) - - elif plms_noise_stage == 2: - noise_pred_prime = predict_stage2(noise_pred, noise_list) - - else: - noise_pred_prime = predict_stage3(noise_pred, noise_list) - - noise_pred = noise_pred.unsqueeze(0) - - if plms_noise_stage < 3: - noise_list = torch.cat((noise_list, noise_pred), dim=0) - plms_noise_stage = plms_noise_stage + 1 - - else: - noise_list = torch.cat((noise_list[-2:], noise_pred), dim=0) - - x = self.get_x_pred(x, noise_pred_prime, t_1, t_prev) - x = self.ad(x) - return x diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/inference/__init__.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/inference/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/vencoder/hubert/hubert_model_onnx.py b/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/vencoder/hubert/hubert_model_onnx.py deleted file mode 100644 index d18f3c2a0fc29592a573a9780308d38f059640b9..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/vencoder/hubert/hubert_model_onnx.py +++ /dev/null @@ -1,217 +0,0 @@ -import copy -import random -from typing import Optional, Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as t_func -from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present - - -class Hubert(nn.Module): - def __init__(self, num_label_embeddings: int = 100, mask: bool = True): - super().__init__() - self._mask = mask - self.feature_extractor = FeatureExtractor() - self.feature_projection = FeatureProjection() - self.positional_embedding = PositionalConvEmbedding() - self.norm = nn.LayerNorm(768) - self.dropout = nn.Dropout(0.1) - self.encoder = TransformerEncoder( - nn.TransformerEncoderLayer( - 768, 12, 3072, activation="gelu", batch_first=True - ), - 12, - ) - self.proj = nn.Linear(768, 256) - - self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_()) - self.label_embedding = nn.Embedding(num_label_embeddings, 256) - - def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - mask = None - if self.training and self._mask: - mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2) - x[mask] = self.masked_spec_embed.to(x.dtype) - return x, mask - - def encode( - self, x: torch.Tensor, layer: Optional[int] = None - ) -> Tuple[torch.Tensor, torch.Tensor]: - x = self.feature_extractor(x) - x = self.feature_projection(x.transpose(1, 2)) - x, mask = self.mask(x) - x = x + self.positional_embedding(x) - x = self.dropout(self.norm(x)) - x = self.encoder(x, output_layer=layer) - return x, mask - - def logits(self, x: torch.Tensor) -> torch.Tensor: - logits = torch.cosine_similarity( - x.unsqueeze(2), - self.label_embedding.weight.unsqueeze(0).unsqueeze(0), - dim=-1, - ) - return logits / 0.1 - - -class HubertSoft(Hubert): - def __init__(self): - super().__init__() - - def units(self, wav: torch.Tensor) -> torch.Tensor: - wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2)) - x, _ = self.encode(wav) - return self.proj(x) - - def forward(self, x): - return self.units(x) - -class FeatureExtractor(nn.Module): - def __init__(self): - super().__init__() - self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False) - self.norm0 = nn.GroupNorm(512, 512) - self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False) - self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = t_func.gelu(self.norm0(self.conv0(x))) - x = t_func.gelu(self.conv1(x)) - x = t_func.gelu(self.conv2(x)) - x = t_func.gelu(self.conv3(x)) - x = t_func.gelu(self.conv4(x)) - x = t_func.gelu(self.conv5(x)) - x = t_func.gelu(self.conv6(x)) - return x - - -class FeatureProjection(nn.Module): - def __init__(self): - super().__init__() - self.norm = nn.LayerNorm(512) - self.projection = nn.Linear(512, 768) - self.dropout = nn.Dropout(0.1) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.norm(x) - x = self.projection(x) - x = self.dropout(x) - return x - - -class PositionalConvEmbedding(nn.Module): - def __init__(self): - super().__init__() - self.conv = nn.Conv1d( - 768, - 768, - kernel_size=128, - padding=128 // 2, - groups=16, - ) - self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.conv(x.transpose(1, 2)) - x = t_func.gelu(x[:, :, :-1]) - return x.transpose(1, 2) - - -class TransformerEncoder(nn.Module): - def __init__( - self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int - ) -> None: - super(TransformerEncoder, self).__init__() - self.layers = nn.ModuleList( - [copy.deepcopy(encoder_layer) for _ in range(num_layers)] - ) - self.num_layers = num_layers - - def forward( - self, - src: torch.Tensor, - mask: torch.Tensor = None, - src_key_padding_mask: torch.Tensor = None, - output_layer: Optional[int] = None, - ) -> torch.Tensor: - output = src - for layer in self.layers[:output_layer]: - output = layer( - output, src_mask=mask, src_key_padding_mask=src_key_padding_mask - ) - return output - - -def _compute_mask( - shape: Tuple[int, int], - mask_prob: float, - mask_length: int, - device: torch.device, - min_masks: int = 0, -) -> torch.Tensor: - batch_size, sequence_length = shape - - if mask_length < 1: - raise ValueError("`mask_length` has to be bigger than 0.") - - if mask_length > sequence_length: - raise ValueError( - f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`" - ) - - # compute number of masked spans in batch - num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random()) - num_masked_spans = max(num_masked_spans, min_masks) - - # make sure num masked indices <= sequence_length - if num_masked_spans * mask_length > sequence_length: - num_masked_spans = sequence_length // mask_length - - # SpecAugment mask to fill - mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool) - - # uniform distribution to sample from, make sure that offset samples are < sequence_length - uniform_dist = torch.ones( - (batch_size, sequence_length - (mask_length - 1)), device=device - ) - - # get random indices to mask - mask_indices = torch.multinomial(uniform_dist, num_masked_spans) - - # expand masked indices to masked spans - mask_indices = ( - mask_indices.unsqueeze(dim=-1) - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - offsets = ( - torch.arange(mask_length, device=device)[None, None, :] - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - mask_idxs = mask_indices + offsets - - # scatter indices to mask - mask = mask.scatter(1, mask_idxs, True) - - return mask - - -def hubert_soft( - path: str, -) -> HubertSoft: - r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`. - Args: - path (str): path of a pretrained model - """ - hubert = HubertSoft() - checkpoint = torch.load(path) - consume_prefix_in_state_dict_if_present(checkpoint, "module.") - hubert.load_state_dict(checkpoint) - hubert.eval() - return hubert diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/configs/common/models/fcos.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/configs/common/models/fcos.py deleted file mode 100644 index 1c752029b7fc64ec375a55182e5342c9eb48bb33..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/configs/common/models/fcos.py +++ /dev/null @@ -1,23 +0,0 @@ -from detectron2.modeling.meta_arch.fcos import FCOS, FCOSHead - -from .retinanet import model - -model._target_ = FCOS - -del model.anchor_generator -del model.box2box_transform -del model.anchor_matcher -del model.input_format - -# Use P5 instead of C5 to compute P6/P7 -# (Sec 2.2 of https://arxiv.org/abs/2006.09214) -model.backbone.top_block.in_feature = "p5" -model.backbone.top_block.in_channels = 256 - -# New score threshold determined based on sqrt(cls_score * centerness) -model.test_score_thresh = 0.2 -model.test_nms_thresh = 0.6 - -model.head._target_ = FCOSHead -del model.head.num_anchors -model.head.norm = "GN" diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/configs/common/models/mask_rcnn_fpn.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/configs/common/models/mask_rcnn_fpn.py deleted file mode 100644 index 744d5306f5b0ba4cf508731bd790bad823b520fa..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/configs/common/models/mask_rcnn_fpn.py +++ /dev/null @@ -1,93 +0,0 @@ -from detectron2.config import LazyCall as L -from detectron2.layers import ShapeSpec -from detectron2.modeling.meta_arch import GeneralizedRCNN -from detectron2.modeling.anchor_generator import DefaultAnchorGenerator -from detectron2.modeling.backbone.fpn import LastLevelMaxPool -from detectron2.modeling.backbone import BasicStem, FPN, ResNet -from detectron2.modeling.box_regression import Box2BoxTransform -from detectron2.modeling.matcher import Matcher -from detectron2.modeling.poolers import ROIPooler -from detectron2.modeling.proposal_generator import RPN, StandardRPNHead -from detectron2.modeling.roi_heads import ( - StandardROIHeads, - FastRCNNOutputLayers, - MaskRCNNConvUpsampleHead, - FastRCNNConvFCHead, -) - -model = L(GeneralizedRCNN)( - backbone=L(FPN)( - bottom_up=L(ResNet)( - stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), - stages=L(ResNet.make_default_stages)( - depth=50, - stride_in_1x1=True, - norm="FrozenBN", - ), - out_features=["res2", "res3", "res4", "res5"], - ), - in_features="${.bottom_up.out_features}", - out_channels=256, - top_block=L(LastLevelMaxPool)(), - ), - proposal_generator=L(RPN)( - in_features=["p2", "p3", "p4", "p5", "p6"], - head=L(StandardRPNHead)(in_channels=256, num_anchors=3), - anchor_generator=L(DefaultAnchorGenerator)( - sizes=[[32], [64], [128], [256], [512]], - aspect_ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64], - offset=0.0, - ), - anchor_matcher=L(Matcher)( - thresholds=[0.3, 0.7], labels=[0, -1, 1], allow_low_quality_matches=True - ), - box2box_transform=L(Box2BoxTransform)(weights=[1.0, 1.0, 1.0, 1.0]), - batch_size_per_image=256, - positive_fraction=0.5, - pre_nms_topk=(2000, 1000), - post_nms_topk=(1000, 1000), - nms_thresh=0.7, - ), - roi_heads=L(StandardROIHeads)( - num_classes=80, - batch_size_per_image=512, - positive_fraction=0.25, - proposal_matcher=L(Matcher)( - thresholds=[0.5], labels=[0, 1], allow_low_quality_matches=False - ), - box_in_features=["p2", "p3", "p4", "p5"], - box_pooler=L(ROIPooler)( - output_size=7, - scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32), - sampling_ratio=0, - pooler_type="ROIAlignV2", - ), - box_head=L(FastRCNNConvFCHead)( - input_shape=ShapeSpec(channels=256, height=7, width=7), - conv_dims=[], - fc_dims=[1024, 1024], - ), - box_predictor=L(FastRCNNOutputLayers)( - input_shape=ShapeSpec(channels=1024), - test_score_thresh=0.05, - box2box_transform=L(Box2BoxTransform)(weights=(10, 10, 5, 5)), - num_classes="${..num_classes}", - ), - mask_in_features=["p2", "p3", "p4", "p5"], - mask_pooler=L(ROIPooler)( - output_size=14, - scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32), - sampling_ratio=0, - pooler_type="ROIAlignV2", - ), - mask_head=L(MaskRCNNConvUpsampleHead)( - input_shape=ShapeSpec(channels=256, width=14, height=14), - num_classes="${..num_classes}", - conv_dims=[256, 256, 256, 256, 256], - ), - ), - pixel_mean=[103.530, 116.280, 123.675], - pixel_std=[1.0, 1.0, 1.0], - input_format="BGR", -) diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py deleted file mode 100644 index df7a2aedf480ed8dc4aa3645e37420e9b893fae4..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py +++ /dev/null @@ -1,72 +0,0 @@ -import detectron2.data.transforms as T -from detectron2.config.lazy import LazyCall as L -from detectron2.layers.batch_norm import NaiveSyncBatchNorm -from detectron2.solver import WarmupParamScheduler -from fvcore.common.param_scheduler import MultiStepParamScheduler - -from ..common.data.coco import dataloader -from ..common.models.mask_rcnn_fpn import model -from ..common.optim import SGD as optimizer -from ..common.train import train - -# train from scratch -train.init_checkpoint = "" -train.amp.enabled = True -train.ddp.fp16_compression = True -model.backbone.bottom_up.freeze_at = 0 - -# SyncBN -# fmt: off -model.backbone.bottom_up.stem.norm = \ - model.backbone.bottom_up.stages.norm = \ - model.backbone.norm = "SyncBN" - -# Using NaiveSyncBatchNorm becase heads may have empty input. That is not supported by -# torch.nn.SyncBatchNorm. We can remove this after -# https://github.com/pytorch/pytorch/issues/36530 is fixed. -model.roi_heads.box_head.conv_norm = \ - model.roi_heads.mask_head.conv_norm = lambda c: NaiveSyncBatchNorm(c, - stats_mode="N") -# fmt: on - -# 2conv in RPN: -# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/modeling/architecture/heads.py#L95-L97 # noqa: E501, B950 -model.proposal_generator.head.conv_dims = [-1, -1] - -# 4conv1fc box head -model.roi_heads.box_head.conv_dims = [256, 256, 256, 256] -model.roi_heads.box_head.fc_dims = [1024] - -# resize_and_crop_image in: -# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/utils/input_utils.py#L127 # noqa: E501, B950 -image_size = 1024 -dataloader.train.mapper.augmentations = [ - L(T.ResizeScale)( - min_scale=0.1, max_scale=2.0, target_height=image_size, target_width=image_size - ), - L(T.FixedSizeCrop)(crop_size=(image_size, image_size)), - L(T.RandomFlip)(horizontal=True), -] - -# recompute boxes due to cropping -dataloader.train.mapper.recompute_boxes = True - -# larger batch-size. -dataloader.train.total_batch_size = 64 - -# Equivalent to 100 epochs. -# 100 ep = 184375 iters * 64 images/iter / 118000 images/ep -train.max_iter = 184375 - -lr_multiplier = L(WarmupParamScheduler)( - scheduler=L(MultiStepParamScheduler)( - values=[1.0, 0.1, 0.01], - milestones=[163889, 177546], - num_updates=train.max_iter, - ), - warmup_length=500 / train.max_iter, - warmup_factor=0.067, -) - -optimizer.lr = 0.1 -optimizer.weight_decay = 4e-5 diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/checkpoint/detection_checkpoint.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/checkpoint/detection_checkpoint.py deleted file mode 100644 index 82fd3b2d40054573917a445b138d29a6dabfb907..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/checkpoint/detection_checkpoint.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -import os -import pickle -import torch -from fvcore.common.checkpoint import Checkpointer -from torch.nn.parallel import DistributedDataParallel - -import detectron2.utils.comm as comm -from detectron2.utils.file_io import PathManager - -from .c2_model_loading import align_and_update_state_dicts - - -class DetectionCheckpointer(Checkpointer): - """ - Same as :class:`Checkpointer`, but is able to: - 1. handle models in detectron & detectron2 model zoo, and apply conversions for legacy models. - 2. correctly load checkpoints that are only available on the master worker - """ - - def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables): - is_main_process = comm.is_main_process() - super().__init__( - model, - save_dir, - save_to_disk=is_main_process if save_to_disk is None else save_to_disk, - **checkpointables, - ) - self.path_manager = PathManager - - def load(self, path, *args, **kwargs): - need_sync = False - - if path and isinstance(self.model, DistributedDataParallel): - logger = logging.getLogger(__name__) - path = self.path_manager.get_local_path(path) - has_file = os.path.isfile(path) - all_has_file = comm.all_gather(has_file) - if not all_has_file[0]: - raise OSError(f"File {path} not found on main worker.") - if not all(all_has_file): - logger.warning( - f"Not all workers can read checkpoint {path}. " - "Training may fail to fully resume." - ) - # TODO: broadcast the checkpoint file contents from main - # worker, and load from it instead. - need_sync = True - if not has_file: - path = None # don't load if not readable - ret = super().load(path, *args, **kwargs) - - if need_sync: - logger.info("Broadcasting model states from main worker ...") - self.model._sync_params_and_buffers() - return ret - - def _load_file(self, filename): - if filename.endswith(".pkl"): - with PathManager.open(filename, "rb") as f: - data = pickle.load(f, encoding="latin1") - if "model" in data and "__author__" in data: - # file is in Detectron2 model zoo format - self.logger.info("Reading a file from '{}'".format(data["__author__"])) - return data - else: - # assume file is from Caffe2 / Detectron1 model zoo - if "blobs" in data: - # Detection models have "blobs", but ImageNet models don't - data = data["blobs"] - data = {k: v for k, v in data.items() if not k.endswith("_momentum")} - return {"model": data, "__author__": "Caffe2", "matching_heuristics": True} - elif filename.endswith(".pyth"): - # assume file is from pycls; no one else seems to use the ".pyth" extension - with PathManager.open(filename, "rb") as f: - data = torch.load(f) - assert ( - "model_state" in data - ), f"Cannot load .pyth file {filename}; pycls checkpoints must contain 'model_state'." - model_state = { - k: v - for k, v in data["model_state"].items() - if not k.endswith("num_batches_tracked") - } - return {"model": model_state, "__author__": "pycls", "matching_heuristics": True} - - loaded = super()._load_file(filename) # load native pth checkpoint - if "model" not in loaded: - loaded = {"model": loaded} - return loaded - - def _load_model(self, checkpoint): - if checkpoint.get("matching_heuristics", False): - self._convert_ndarray_to_tensor(checkpoint["model"]) - # convert weights by name-matching heuristics - checkpoint["model"] = align_and_update_state_dicts( - self.model.state_dict(), - checkpoint["model"], - c2_conversion=checkpoint.get("__author__", None) == "Caffe2", - ) - # for non-caffe2 models, use standard ways to load it - incompatible = super()._load_model(checkpoint) - - model_buffers = dict(self.model.named_buffers(recurse=False)) - for k in ["pixel_mean", "pixel_std"]: - # Ignore missing key message about pixel_mean/std. - # Though they may be missing in old checkpoints, they will be correctly - # initialized from config anyway. - if k in model_buffers: - try: - incompatible.missing_keys.remove(k) - except ValueError: - pass - for k in incompatible.unexpected_keys[:]: - # Ignore unexpected keys about cell anchors. They exist in old checkpoints - # but now they are non-persistent buffers and will not be in new checkpoints. - if "anchor_generator.cell_anchors" in k: - incompatible.unexpected_keys.remove(k) - return incompatible diff --git a/spaces/yueranseo/mygpt/modules/pdf_func.py b/spaces/yueranseo/mygpt/modules/pdf_func.py deleted file mode 100644 index 1b1087f2687fd26c8676867dd45189c069dd56a5..0000000000000000000000000000000000000000 --- a/spaces/yueranseo/mygpt/modules/pdf_func.py +++ /dev/null @@ -1,180 +0,0 @@ -from types import SimpleNamespace -import pdfplumber -import logging -from langchain.docstore.document import Document - -def prepare_table_config(crop_page): - """Prepare table查找边界, 要求page为原始page - - From https://github.com/jsvine/pdfplumber/issues/242 - """ - page = crop_page.root_page # root/parent - cs = page.curves + page.edges - def curves_to_edges(): - """See https://github.com/jsvine/pdfplumber/issues/127""" - edges = [] - for c in cs: - edges += pdfplumber.utils.rect_to_edges(c) - return edges - edges = curves_to_edges() - return { - "vertical_strategy": "explicit", - "horizontal_strategy": "explicit", - "explicit_vertical_lines": edges, - "explicit_horizontal_lines": edges, - "intersection_y_tolerance": 10, - } - -def get_text_outside_table(crop_page): - ts = prepare_table_config(crop_page) - if len(ts["explicit_vertical_lines"]) == 0 or len(ts["explicit_horizontal_lines"]) == 0: - return crop_page - - ### Get the bounding boxes of the tables on the page. - bboxes = [table.bbox for table in crop_page.root_page.find_tables(table_settings=ts)] - def not_within_bboxes(obj): - """Check if the object is in any of the table's bbox.""" - def obj_in_bbox(_bbox): - """See https://github.com/jsvine/pdfplumber/blob/stable/pdfplumber/table.py#L404""" - v_mid = (obj["top"] + obj["bottom"]) / 2 - h_mid = (obj["x0"] + obj["x1"]) / 2 - x0, top, x1, bottom = _bbox - return (h_mid >= x0) and (h_mid < x1) and (v_mid >= top) and (v_mid < bottom) - return not any(obj_in_bbox(__bbox) for __bbox in bboxes) - - return crop_page.filter(not_within_bboxes) -# 请使用 LaTeX 表达公式,行内公式以 $ 包裹,行间公式以 $$ 包裹 - -extract_words = lambda page: page.extract_words(keep_blank_chars=True, y_tolerance=0, x_tolerance=1, extra_attrs=["fontname", "size", "object_type"]) -# dict_keys(['text', 'x0', 'x1', 'top', 'doctop', 'bottom', 'upright', 'direction', 'fontname', 'size']) - -def get_title_with_cropped_page(first_page): - title = [] # 处理标题 - x0,top,x1,bottom = first_page.bbox # 获取页面边框 - - for word in extract_words(first_page): - word = SimpleNamespace(**word) - - if word.size >= 14: - title.append(word.text) - title_bottom = word.bottom - elif word.text == "Abstract": # 获取页面abstract - top = word.top - - user_info = [i["text"] for i in extract_words(first_page.within_bbox((x0,title_bottom,x1,top)))] - # 裁剪掉上半部分, within_bbox: full_included; crop: partial_included - return title, user_info, first_page.within_bbox((x0,top,x1,bottom)) - -def get_column_cropped_pages(pages, two_column=True): - new_pages = [] - for page in pages: - if two_column: - left = page.within_bbox((0, 0, page.width/2, page.height),relative=True) - right = page.within_bbox((page.width/2, 0, page.width, page.height), relative=True) - new_pages.append(left) - new_pages.append(right) - else: - new_pages.append(page) - - return new_pages - -def parse_pdf(filename, two_column = True): - level = logging.getLogger().level - if level == logging.getLevelName("DEBUG"): - logging.getLogger().setLevel("INFO") - - with pdfplumber.open(filename) as pdf: - title, user_info, first_page = get_title_with_cropped_page(pdf.pages[0]) - new_pages = get_column_cropped_pages([first_page] + pdf.pages[1:], two_column) - - chapters = [] - # tuple (chapter_name, [pageid] (start,stop), chapter_text) - create_chapter = lambda page_start,name_top,name_bottom: SimpleNamespace( - name=[], - name_top=name_top, - name_bottom=name_bottom, - record_chapter_name = True, - - page_start=page_start, - page_stop=None, - - text=[], - ) - cur_chapter = None - - # 按页遍历PDF文档 - for idx, page in enumerate(new_pages): - page = get_text_outside_table(page) - - # 按行遍历页面文本 - for word in extract_words(page): - word = SimpleNamespace(**word) - - # 检查行文本是否以12号字体打印,如果是,则将其作为新章节开始 - if word.size >= 11: # 出现chapter name - if cur_chapter is None: - cur_chapter = create_chapter(page.page_number, word.top, word.bottom) - elif not cur_chapter.record_chapter_name or (cur_chapter.name_bottom != cur_chapter.name_bottom and cur_chapter.name_top != cur_chapter.name_top): - # 不再继续写chapter name - cur_chapter.page_stop = page.page_number # stop id - chapters.append(cur_chapter) - # 重置当前chapter信息 - cur_chapter = create_chapter(page.page_number, word.top, word.bottom) - - # print(word.size, word.top, word.bottom, word.text) - cur_chapter.name.append(word.text) - else: - cur_chapter.record_chapter_name = False # chapter name 结束 - cur_chapter.text.append(word.text) - else: - # 处理最后一个章节 - cur_chapter.page_stop = page.page_number # stop id - chapters.append(cur_chapter) - - for i in chapters: - logging.info(f"section: {i.name} pages:{i.page_start, i.page_stop} word-count:{len(i.text)}") - logging.debug(" ".join(i.text)) - - title = " ".join(title) - user_info = " ".join(user_info) - text = f"Article Title: {title}, Information:{user_info}\n" - for idx, chapter in enumerate(chapters): - chapter.name = " ".join(chapter.name) - text += f"The {idx}th Chapter {chapter.name}: " + " ".join(chapter.text) + "\n" - - logging.getLogger().setLevel(level) - return Document(page_content=text, metadata={"title": title}) - -BASE_POINTS = """ -1. Who are the authors? -2. What is the process of the proposed method? -3. What is the performance of the proposed method? Please note down its performance metrics. -4. What are the baseline models and their performances? Please note down these baseline methods. -5. What dataset did this paper use? -""" - -READING_PROMPT = """ -You are a researcher helper bot. You can help the user with research paper reading and summarizing. \n -Now I am going to send you a paper. You need to read it and summarize it for me part by part. \n -When you are reading, You need to focus on these key points:{} -""" - -READING_PROMT_V2 = """ -You are a researcher helper bot. You can help the user with research paper reading and summarizing. \n -Now I am going to send you a paper. You need to read it and summarize it for me part by part. \n -When you are reading, You need to focus on these key points:{}, - -And You need to generate a brief but informative title for this part. -Your return format: -- title: '...' -- summary: '...' -""" - -SUMMARY_PROMPT = "You are a researcher helper bot. Now you need to read the summaries of a research paper." - - -if __name__ == '__main__': - # Test code - z = parse_pdf("./build/test.pdf") - print(z["user_info"]) - print(z["title"]) \ No newline at end of file diff --git a/spaces/zhanpj/ChatGPT/Dockerfile b/spaces/zhanpj/ChatGPT/Dockerfile deleted file mode 100644 index 8cbd335b09b1d1975bfd83a053b5fcaf398147ea..0000000000000000000000000000000000000000 --- a/spaces/zhanpj/ChatGPT/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM python:3.9 as builder -RUN apt-get update && apt-get install -y build-essential -COPY requirements.txt . -RUN pip install --user -r requirements.txt - -FROM python:3.9 -MAINTAINER iskoldt -COPY --from=builder /root/.local /root/.local -ENV PATH=/root/.local/bin:$PATH -COPY . /app -WORKDIR /app -ENV my_api_key empty -ENV dockerrun yes -CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"] diff --git a/spaces/zhtet/RegBotBeta/pages/llama_custom_demo.py b/spaces/zhtet/RegBotBeta/pages/llama_custom_demo.py deleted file mode 100644 index c32298dfd968c1d1cc9150686cfbace7bdd0b8cc..0000000000000000000000000000000000000000 --- a/spaces/zhtet/RegBotBeta/pages/llama_custom_demo.py +++ /dev/null @@ -1,26 +0,0 @@ -import os - -import openai -import streamlit as st - -from models.llamaCustom import LlamaCustom -from utils.chatbox import chatbox - -st.set_page_config(page_title="Llama", page_icon="🦙") - -st.subheader("Llama Index with Custom LLM Demo") - -if "messages" not in st.session_state: - st.session_state.messages = [] - -if "openai_api_key" not in st.session_state: - st.info("Enter your openai key to access the chatbot.") -else: - option = st.selectbox( - label="Select your model:", options=("bigscience/bloom-560m",) - ) - - # with st.spinner("Initializing vector index"): - model = LlamaCustom(model_name=option) - - chatbox("llama_custom", model) diff --git a/spaces/zhuolisam/resume-ranker/core.py b/spaces/zhuolisam/resume-ranker/core.py deleted file mode 100644 index 6bcbbab645a8530179c39f4cfe60cf5cd71cc39b..0000000000000000000000000000000000000000 --- a/spaces/zhuolisam/resume-ranker/core.py +++ /dev/null @@ -1,37 +0,0 @@ -from embedding import embedding -from preprocessing import preprocess -from sklearn.metrics.pairwise import cosine_similarity -import numpy as np -import streamlit as st - -def pipeline(input_doc:str , ori_documents, embedding_type='bert'): - documents = np.array([doc['content'] for doc in ori_documents]) - documents = np.insert(documents, 0, input_doc) - # st.write(documents) - preprocessed_documents = preprocess(documents) - # st.write(preprocessed_documents) - print("Encoding with BERT...") - documents_vectors = embedding(preprocessed_documents, embedding=embedding_type) - print("Encoding finished") - - #compute cosine similarity - pairwise = cosine_similarity(documents_vectors) - - #only retain useful information - pairwise = pairwise[0,1:] - sorted_idx = np.argsort(pairwise)[::-1] - result_pairwise = pairwise[sorted_idx] - - results = [] - print('Resume ranking:') - for idx in sorted_idx: - single_result = { - 'rank': idx, - 'name': ori_documents[idx]['name'], - 'similarity': pairwise[idx].item() - } - results.append(single_result) - print(f'Resume of candidite {idx}') - print(f'Cosine Similarity: {pairwise[idx]}\n') - - return results, result_pairwise \ No newline at end of file diff --git a/spaces/zomehwh/sovits-goldship/hubert/hubert_model.py b/spaces/zomehwh/sovits-goldship/hubert/hubert_model.py deleted file mode 100644 index 7fb642d89b07ca60792debab18e3454f52d8f357..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/sovits-goldship/hubert/hubert_model.py +++ /dev/null @@ -1,222 +0,0 @@ -import copy -import random -from typing import Optional, Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as t_func -from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present - - -class Hubert(nn.Module): - def __init__(self, num_label_embeddings: int = 100, mask: bool = True): - super().__init__() - self._mask = mask - self.feature_extractor = FeatureExtractor() - self.feature_projection = FeatureProjection() - self.positional_embedding = PositionalConvEmbedding() - self.norm = nn.LayerNorm(768) - self.dropout = nn.Dropout(0.1) - self.encoder = TransformerEncoder( - nn.TransformerEncoderLayer( - 768, 12, 3072, activation="gelu", batch_first=True - ), - 12, - ) - self.proj = nn.Linear(768, 256) - - self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_()) - self.label_embedding = nn.Embedding(num_label_embeddings, 256) - - def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - mask = None - if self.training and self._mask: - mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2) - x[mask] = self.masked_spec_embed.to(x.dtype) - return x, mask - - def encode( - self, x: torch.Tensor, layer: Optional[int] = None - ) -> Tuple[torch.Tensor, torch.Tensor]: - x = self.feature_extractor(x) - x = self.feature_projection(x.transpose(1, 2)) - x, mask = self.mask(x) - x = x + self.positional_embedding(x) - x = self.dropout(self.norm(x)) - x = self.encoder(x, output_layer=layer) - return x, mask - - def logits(self, x: torch.Tensor) -> torch.Tensor: - logits = torch.cosine_similarity( - x.unsqueeze(2), - self.label_embedding.weight.unsqueeze(0).unsqueeze(0), - dim=-1, - ) - return logits / 0.1 - - def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - x, mask = self.encode(x) - x = self.proj(x) - logits = self.logits(x) - return logits, mask - - -class HubertSoft(Hubert): - def __init__(self): - super().__init__() - - @torch.inference_mode() - def units(self, wav: torch.Tensor) -> torch.Tensor: - wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2)) - x, _ = self.encode(wav) - return self.proj(x) - - -class FeatureExtractor(nn.Module): - def __init__(self): - super().__init__() - self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False) - self.norm0 = nn.GroupNorm(512, 512) - self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False) - self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = t_func.gelu(self.norm0(self.conv0(x))) - x = t_func.gelu(self.conv1(x)) - x = t_func.gelu(self.conv2(x)) - x = t_func.gelu(self.conv3(x)) - x = t_func.gelu(self.conv4(x)) - x = t_func.gelu(self.conv5(x)) - x = t_func.gelu(self.conv6(x)) - return x - - -class FeatureProjection(nn.Module): - def __init__(self): - super().__init__() - self.norm = nn.LayerNorm(512) - self.projection = nn.Linear(512, 768) - self.dropout = nn.Dropout(0.1) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.norm(x) - x = self.projection(x) - x = self.dropout(x) - return x - - -class PositionalConvEmbedding(nn.Module): - def __init__(self): - super().__init__() - self.conv = nn.Conv1d( - 768, - 768, - kernel_size=128, - padding=128 // 2, - groups=16, - ) - self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.conv(x.transpose(1, 2)) - x = t_func.gelu(x[:, :, :-1]) - return x.transpose(1, 2) - - -class TransformerEncoder(nn.Module): - def __init__( - self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int - ) -> None: - super(TransformerEncoder, self).__init__() - self.layers = nn.ModuleList( - [copy.deepcopy(encoder_layer) for _ in range(num_layers)] - ) - self.num_layers = num_layers - - def forward( - self, - src: torch.Tensor, - mask: torch.Tensor = None, - src_key_padding_mask: torch.Tensor = None, - output_layer: Optional[int] = None, - ) -> torch.Tensor: - output = src - for layer in self.layers[:output_layer]: - output = layer( - output, src_mask=mask, src_key_padding_mask=src_key_padding_mask - ) - return output - - -def _compute_mask( - shape: Tuple[int, int], - mask_prob: float, - mask_length: int, - device: torch.device, - min_masks: int = 0, -) -> torch.Tensor: - batch_size, sequence_length = shape - - if mask_length < 1: - raise ValueError("`mask_length` has to be bigger than 0.") - - if mask_length > sequence_length: - raise ValueError( - f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`" - ) - - # compute number of masked spans in batch - num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random()) - num_masked_spans = max(num_masked_spans, min_masks) - - # make sure num masked indices <= sequence_length - if num_masked_spans * mask_length > sequence_length: - num_masked_spans = sequence_length // mask_length - - # SpecAugment mask to fill - mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool) - - # uniform distribution to sample from, make sure that offset samples are < sequence_length - uniform_dist = torch.ones( - (batch_size, sequence_length - (mask_length - 1)), device=device - ) - - # get random indices to mask - mask_indices = torch.multinomial(uniform_dist, num_masked_spans) - - # expand masked indices to masked spans - mask_indices = ( - mask_indices.unsqueeze(dim=-1) - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - offsets = ( - torch.arange(mask_length, device=device)[None, None, :] - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - mask_idxs = mask_indices + offsets - - # scatter indices to mask - mask = mask.scatter(1, mask_idxs, True) - - return mask - - -def hubert_soft( - path: str, -) -> HubertSoft: - r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`. - Args: - path (str): path of a pretrained model - """ - hubert = HubertSoft() - checkpoint = torch.load(path) - consume_prefix_in_state_dict_if_present(checkpoint, "module.") - hubert.load_state_dict(checkpoint) - hubert.eval() - return hubert diff --git a/spaces/zomehwh/sovits-tannhauser/vdecoder/hifigan/models.py b/spaces/zomehwh/sovits-tannhauser/vdecoder/hifigan/models.py deleted file mode 100644 index 9747301f350bb269e62601017fe4633ce271b27e..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/sovits-tannhauser/vdecoder/hifigan/models.py +++ /dev/null @@ -1,503 +0,0 @@ -import os -import json -from .env import AttrDict -import numpy as np -import torch -import torch.nn.functional as F -import torch.nn as nn -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from .utils import init_weights, get_padding - -LRELU_SLOPE = 0.1 - - -def load_model(model_path, device='cuda'): - config_file = os.path.join(os.path.split(model_path)[0], 'config.json') - with open(config_file) as f: - data = f.read() - - global h - json_config = json.loads(data) - h = AttrDict(json_config) - - generator = Generator(h).to(device) - - cp_dict = torch.load(model_path) - generator.load_state_dict(cp_dict['generator']) - generator.eval() - generator.remove_weight_norm() - del cp_dict - return generator, h - - -class ResBlock1(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.h = h - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - xt = c2(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.h = h - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -def padDiff(x): - return F.pad(F.pad(x, (0,0,-1,1), 'constant', 0) - x, (0,0,0,-1), 'constant', 0) - -class SineGen(torch.nn.Module): - """ Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__(self, samp_rate, harmonic_num=0, - sine_amp=0.1, noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - self.flag_for_pulse = flag_for_pulse - - def _f02uv(self, f0): - # generate uv signal - uv = (f0 > self.voiced_threshold).type(torch.float32) - return uv - - def _f02sine(self, f0_values): - """ f0_values: (batchsize, length, dim) - where dim indicates fundamental tone and overtones - """ - # convert to F0 in rad. The interger part n can be ignored - # because 2 * np.pi * n doesn't affect phase - rad_values = (f0_values / self.sampling_rate) % 1 - - # initial phase noise (no noise for fundamental component) - rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ - device=f0_values.device) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - - # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) - if not self.flag_for_pulse: - # for normal case - - # To prevent torch.cumsum numerical overflow, - # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. - # Buffer tmp_over_one_idx indicates the time step to add -1. - # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi - tmp_over_one = torch.cumsum(rad_values, 1) % 1 - tmp_over_one_idx = (padDiff(tmp_over_one)) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - - sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) - * 2 * np.pi) - else: - # If necessary, make sure that the first time step of every - # voiced segments is sin(pi) or cos(0) - # This is used for pulse-train generation - - # identify the last time step in unvoiced segments - uv = self._f02uv(f0_values) - uv_1 = torch.roll(uv, shifts=-1, dims=1) - uv_1[:, -1, :] = 1 - u_loc = (uv < 1) * (uv_1 > 0) - - # get the instantanouse phase - tmp_cumsum = torch.cumsum(rad_values, dim=1) - # different batch needs to be processed differently - for idx in range(f0_values.shape[0]): - temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] - temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] - # stores the accumulation of i.phase within - # each voiced segments - tmp_cumsum[idx, :, :] = 0 - tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum - - # rad_values - tmp_cumsum: remove the accumulation of i.phase - # within the previous voiced segment. - i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) - - # get the sines - sines = torch.cos(i_phase * 2 * np.pi) - return sines - - def forward(self, f0): - """ sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, - device=f0.device) - # fundamental component - fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device)) - - # generate sine waveforms - sine_waves = self._f02sine(fn) * self.sine_amp - - # generate uv signal - # uv = torch.ones(f0.shape) - # uv = uv * (f0 > self.voiced_threshold) - uv = self._f02uv(f0) - - # noise: for unvoiced should be similar to sine_amp - # std = self.sine_amp/3 -> max value ~ self.sine_amp - # . for voiced regions is self.noise_std - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - - # first: set the unvoiced part to 0 by uv - # then: additive noise - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """ SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - - # to produce sine waveforms - self.l_sin_gen = SineGen(sampling_rate, harmonic_num, - sine_amp, add_noise_std, voiced_threshod) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x): - """ - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - """ - # source for harmonic branch - sine_wavs, uv, _ = self.l_sin_gen(x) - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - - # source for noise branch, in the same shape as uv - noise = torch.randn_like(uv) * self.sine_amp / 3 - return sine_merge, noise, uv - - -class Generator(torch.nn.Module): - def __init__(self, h): - super(Generator, self).__init__() - self.h = h - - self.num_kernels = len(h["resblock_kernel_sizes"]) - self.num_upsamples = len(h["upsample_rates"]) - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h["upsample_rates"])) - self.m_source = SourceModuleHnNSF( - sampling_rate=h["sampling_rate"], - harmonic_num=8) - self.noise_convs = nn.ModuleList() - self.conv_pre = weight_norm(Conv1d(h["inter_channels"], h["upsample_initial_channel"], 7, 1, padding=3)) - resblock = ResBlock1 if h["resblock"] == '1' else ResBlock2 - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(h["upsample_rates"], h["upsample_kernel_sizes"])): - c_cur = h["upsample_initial_channel"] // (2 ** (i + 1)) - self.ups.append(weight_norm( - ConvTranspose1d(h["upsample_initial_channel"] // (2 ** i), h["upsample_initial_channel"] // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - if i + 1 < len(h["upsample_rates"]): # - stride_f0 = np.prod(h["upsample_rates"][i + 1:]) - self.noise_convs.append(Conv1d( - 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2)) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = h["upsample_initial_channel"] // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(h["resblock_kernel_sizes"], h["resblock_dilation_sizes"])): - self.resblocks.append(resblock(h, ch, k, d)) - - self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) - self.ups.apply(init_weights) - self.conv_post.apply(init_weights) - self.cond = nn.Conv1d(h['gin_channels'], h['upsample_initial_channel'], 1) - - def forward(self, x, f0, g=None): - # print(1,x.shape,f0.shape,f0[:, None].shape) - f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t - # print(2,f0.shape) - har_source, noi_source, uv = self.m_source(f0) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - x = x + self.cond(g) - # print(124,x.shape,har_source.shape) - for i in range(self.num_upsamples): - x = F.leaky_relu(x, LRELU_SLOPE) - # print(3,x.shape) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - # print(4,x_source.shape,har_source.shape,x.shape) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, periods=None): - super(MultiPeriodDiscriminator, self).__init__() - self.periods = periods if periods is not None else [2, 3, 5, 7, 11] - self.discriminators = nn.ModuleList() - for period in self.periods: - self.discriminators.append(DiscriminatorP(period)) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 128, 15, 1, padding=7)), - norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), - norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), - norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiScaleDiscriminator(torch.nn.Module): - def __init__(self): - super(MultiScaleDiscriminator, self).__init__() - self.discriminators = nn.ModuleList([ - DiscriminatorS(use_spectral_norm=True), - DiscriminatorS(), - DiscriminatorS(), - ]) - self.meanpools = nn.ModuleList([ - AvgPool1d(4, 2, padding=2), - AvgPool1d(4, 2, padding=2) - ]) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - if i != 0: - y = self.meanpools[i - 1](y) - y_hat = self.meanpools[i - 1](y_hat) - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - r_loss = torch.mean((1 - dr) ** 2) - g_loss = torch.mean(dg ** 2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - l = torch.mean((1 - dg) ** 2) - gen_losses.append(l) - loss += l - - return loss, gen_losses